1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline int
39 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 uint16_t mac_len = auth->digest_length;
45 case RTE_CRYPTO_AUTH_MD5:
46 case RTE_CRYPTO_AUTH_MD5_HMAC:
47 ret = (mac_len == 16) ? 0 : -1;
49 case RTE_CRYPTO_AUTH_SHA1:
50 case RTE_CRYPTO_AUTH_SHA1_HMAC:
51 ret = (mac_len == 20) ? 0 : -1;
53 case RTE_CRYPTO_AUTH_SHA224:
54 case RTE_CRYPTO_AUTH_SHA224_HMAC:
55 ret = (mac_len == 28) ? 0 : -1;
57 case RTE_CRYPTO_AUTH_SHA256:
58 case RTE_CRYPTO_AUTH_SHA256_HMAC:
59 ret = (mac_len == 32) ? 0 : -1;
61 case RTE_CRYPTO_AUTH_SHA384:
62 case RTE_CRYPTO_AUTH_SHA384_HMAC:
63 ret = (mac_len == 48) ? 0 : -1;
65 case RTE_CRYPTO_AUTH_SHA512:
66 case RTE_CRYPTO_AUTH_SHA512_HMAC:
67 ret = (mac_len == 64) ? 0 : -1;
69 case RTE_CRYPTO_AUTH_NULL:
79 static __rte_always_inline void
80 cpt_fc_salt_update(void *ctx,
83 struct cpt_ctx *cpt_ctx = ctx;
84 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
87 static __rte_always_inline int
88 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
100 static __rte_always_inline int
101 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
117 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 key_len = key_len / 2;
126 if (unlikely(key_len == 24)) {
127 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
130 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
136 if (unlikely(key_len != 16))
138 /* No support for AEAD yet */
139 if (unlikely(ctx->hash_type))
141 fc_type = ZUC_SNOW3G;
145 if (unlikely(key_len != 16))
147 /* No support for AEAD yet */
148 if (unlikely(ctx->hash_type))
156 ctx->fc_type = fc_type;
160 static __rte_always_inline void
161 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
163 cpt_ctx->enc_cipher = 0;
164 fctx->enc.enc_cipher = 0;
167 static __rte_always_inline void
168 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
170 mc_aes_type_t aes_key_type = 0;
173 aes_key_type = AES_128_BIT;
176 aes_key_type = AES_192_BIT;
179 aes_key_type = AES_256_BIT;
182 /* This should not happen */
183 CPT_LOG_DP_ERR("Invalid AES key len");
186 fctx->enc.aes_key = aes_key_type;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
195 gen_key_snow3g(key, keyx);
196 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
197 cpt_ctx->zsk_flags = 0;
200 static __rte_always_inline void
201 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
205 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
206 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
207 cpt_ctx->zsk_flags = 0;
210 static __rte_always_inline void
211 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
215 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
216 cpt_ctx->zsk_flags = 0;
219 static __rte_always_inline void
220 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
223 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
224 cpt_ctx->zsk_flags = 0;
227 static __rte_always_inline int
228 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
229 uint16_t key_len, uint8_t *salt)
231 struct cpt_ctx *cpt_ctx = ctx;
232 mc_fc_context_t *fctx = &cpt_ctx->fctx;
235 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
239 if (cpt_ctx->fc_type == FC_GEN) {
241 * We need to always say IV is from DPTR as user can
242 * sometimes iverride IV per operation.
244 fctx->enc.iv_source = CPT_FROM_DPTR;
246 if (cpt_ctx->auth_key_len > 64)
252 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
255 /* CPT performs DES using 3DES with the 8B DES-key
256 * replicated 2 more times to match the 24B 3DES-key.
257 * Eg. If org. key is "0x0a 0x0b", then new key is
258 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
261 /* Skipping the first 8B as it will be copied
262 * in the regular code flow
264 memcpy(fctx->enc.encr_key+key_len, key, key_len);
265 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
269 /* For DES3_ECB IV need to be from CTX. */
270 fctx->enc.iv_source = CPT_FROM_CTX;
277 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
280 /* Even though iv source is from dptr,
281 * aes_gcm salt is taken from ctx
284 memcpy(fctx->enc.encr_iv, salt, 4);
285 /* Assuming it was just salt update
291 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
294 key_len = key_len / 2;
295 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
297 /* Copy key2 for XTS into ipad */
298 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
299 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
302 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
305 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
308 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
311 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
317 /* Only for FC_GEN case */
319 /* For GMAC auth, cipher must be NULL */
320 if (cpt_ctx->hash_type != GMAC_TYPE)
321 fctx->enc.enc_cipher = type;
323 memcpy(fctx->enc.encr_key, key, key_len);
326 cpt_ctx->enc_cipher = type;
331 static __rte_always_inline uint32_t
332 fill_sg_comp(sg_comp_t *list,
334 phys_addr_t dma_addr,
337 sg_comp_t *to = &list[i>>2];
339 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
340 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
345 static __rte_always_inline uint32_t
346 fill_sg_comp_from_buf(sg_comp_t *list,
350 sg_comp_t *to = &list[i>>2];
352 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
353 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
358 static __rte_always_inline uint32_t
359 fill_sg_comp_from_buf_min(sg_comp_t *list,
364 sg_comp_t *to = &list[i >> 2];
365 uint32_t size = *psize;
368 e_len = (size > from->size) ? from->size : size;
369 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
370 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
377 * This fills the MC expected SGIO list
378 * from IOV given by user.
380 static __rte_always_inline uint32_t
381 fill_sg_comp_from_iov(sg_comp_t *list,
383 iov_ptr_t *from, uint32_t from_offset,
384 uint32_t *psize, buf_ptr_t *extra_buf,
385 uint32_t extra_offset)
388 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
389 uint32_t size = *psize;
393 for (j = 0; (j < from->buf_cnt) && size; j++) {
394 phys_addr_t e_dma_addr;
396 sg_comp_t *to = &list[i >> 2];
398 if (unlikely(from_offset)) {
399 if (from_offset >= bufs[j].size) {
400 from_offset -= bufs[j].size;
403 e_dma_addr = bufs[j].dma_addr + from_offset;
404 e_len = (size > (bufs[j].size - from_offset)) ?
405 (bufs[j].size - from_offset) : size;
408 e_dma_addr = bufs[j].dma_addr;
409 e_len = (size > bufs[j].size) ?
413 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
414 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
416 if (extra_len && (e_len >= extra_offset)) {
417 /* Break the data at given offset */
418 uint32_t next_len = e_len - extra_offset;
419 phys_addr_t next_dma = e_dma_addr + extra_offset;
424 e_len = extra_offset;
426 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
429 extra_len = RTE_MIN(extra_len, size);
430 /* Insert extra data ptr */
435 rte_cpu_to_be_16(extra_len);
437 rte_cpu_to_be_64(extra_buf->dma_addr);
441 next_len = RTE_MIN(next_len, size);
442 /* insert the rest of the data */
446 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
447 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
456 extra_offset -= size;
464 static __rte_always_inline void
465 cpt_digest_gen_prep(uint32_t flags,
467 digest_params_t *params,
471 struct cpt_request_info *req;
473 uint16_t data_len, mac_len, key_len;
474 auth_type_t hash_type;
477 sg_comp_t *gather_comp;
478 sg_comp_t *scatter_comp;
480 uint32_t g_size_bytes, s_size_bytes;
481 uint64_t dptr_dma, rptr_dma;
482 vq_cmd_word0_t vq_cmd_w0;
483 vq_cmd_word3_t vq_cmd_w3;
484 void *c_vaddr, *m_vaddr;
485 uint64_t c_dma, m_dma;
486 opcode_info_t opcode;
488 ctx = params->ctx_buf.vaddr;
489 meta_p = ¶ms->meta_buf;
491 m_vaddr = meta_p->vaddr;
492 m_dma = meta_p->dma_addr;
495 * Save initial space that followed app data for completion code &
496 * alternate completion code to fall in same cache line as app data
498 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
499 m_dma += COMPLETION_CODE_SIZE;
500 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
502 c_vaddr = (uint8_t *)m_vaddr + size;
503 c_dma = m_dma + size;
504 size += sizeof(cpt_res_s_t);
506 m_vaddr = (uint8_t *)m_vaddr + size;
511 size = sizeof(struct cpt_request_info);
512 m_vaddr = (uint8_t *)m_vaddr + size;
515 hash_type = ctx->hash_type;
516 mac_len = ctx->mac_len;
517 key_len = ctx->auth_key_len;
518 data_len = AUTH_DLEN(d_lens);
522 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
524 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
525 vq_cmd_w0.s.param1 = key_len;
526 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
528 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
529 vq_cmd_w0.s.param1 = 0;
530 vq_cmd_w0.s.dlen = data_len;
535 /* Null auth only case enters the if */
536 if (unlikely(!hash_type && !ctx->enc_cipher)) {
537 opcode.s.major = CPT_MAJOR_OP_MISC;
538 /* Minor op is passthrough */
539 opcode.s.minor = 0x03;
540 /* Send out completion code only */
541 vq_cmd_w0.s.param2 = 0x1;
544 vq_cmd_w0.s.opcode = opcode.flags;
546 /* DPTR has SG list */
550 ((uint16_t *)in_buffer)[0] = 0;
551 ((uint16_t *)in_buffer)[1] = 0;
553 /* TODO Add error check if space will be sufficient */
554 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
563 uint64_t k_dma = params->ctx_buf.dma_addr +
564 offsetof(struct cpt_ctx, auth_key);
566 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
572 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
574 if (unlikely(size)) {
575 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
581 * Looks like we need to support zero data
582 * gather ptr in case of hash & hmac
586 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
587 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
594 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
596 if (flags & VALID_MAC_BUF) {
597 if (unlikely(params->mac_buf.size < mac_len)) {
598 CPT_LOG_DP_ERR("Insufficient MAC size");
603 i = fill_sg_comp_from_buf_min(scatter_comp, i,
604 ¶ms->mac_buf, &size);
607 i = fill_sg_comp_from_iov(scatter_comp, i,
608 params->src_iov, data_len,
610 if (unlikely(size)) {
611 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
617 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
618 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
620 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
622 /* This is DPTR len incase of SG mode */
623 vq_cmd_w0.s.dlen = size;
625 m_vaddr = (uint8_t *)m_vaddr + size;
628 /* cpt alternate completion address saved earlier */
629 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
630 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
631 rptr_dma = c_dma - 8;
633 req->ist.ei1 = dptr_dma;
634 req->ist.ei2 = rptr_dma;
639 /* 16 byte aligned cpt res address */
640 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
641 *req->completion_addr = COMPLETION_CODE_INIT;
642 req->comp_baddr = c_dma;
644 /* Fill microcode part of instruction */
645 req->ist.ei0 = vq_cmd_w0.u64;
646 req->ist.ei3 = vq_cmd_w3.u64;
654 static __rte_always_inline void
655 cpt_enc_hmac_prep(uint32_t flags,
658 fc_params_t *fc_params,
662 uint32_t iv_offset = 0;
663 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
664 struct cpt_ctx *cpt_ctx;
665 uint32_t cipher_type, hash_type;
666 uint32_t mac_len, size;
668 struct cpt_request_info *req;
669 buf_ptr_t *meta_p, *aad_buf = NULL;
670 uint32_t encr_offset, auth_offset;
671 uint32_t encr_data_len, auth_data_len, aad_len = 0;
672 uint32_t passthrough_len = 0;
673 void *m_vaddr, *offset_vaddr;
674 uint64_t m_dma, offset_dma, ctx_dma;
675 vq_cmd_word0_t vq_cmd_w0;
676 vq_cmd_word3_t vq_cmd_w3;
679 opcode_info_t opcode;
681 meta_p = &fc_params->meta_buf;
682 m_vaddr = meta_p->vaddr;
683 m_dma = meta_p->dma_addr;
685 encr_offset = ENCR_OFFSET(d_offs);
686 auth_offset = AUTH_OFFSET(d_offs);
687 encr_data_len = ENCR_DLEN(d_lens);
688 auth_data_len = AUTH_DLEN(d_lens);
689 if (unlikely(flags & VALID_AAD_BUF)) {
691 * We dont support both aad
692 * and auth data separately
696 aad_len = fc_params->aad_buf.size;
697 aad_buf = &fc_params->aad_buf;
699 cpt_ctx = fc_params->ctx_buf.vaddr;
700 cipher_type = cpt_ctx->enc_cipher;
701 hash_type = cpt_ctx->hash_type;
702 mac_len = cpt_ctx->mac_len;
705 * Save initial space that followed app data for completion code &
706 * alternate completion code to fall in same cache line as app data
708 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
709 m_dma += COMPLETION_CODE_SIZE;
710 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
713 c_vaddr = (uint8_t *)m_vaddr + size;
714 c_dma = m_dma + size;
715 size += sizeof(cpt_res_s_t);
717 m_vaddr = (uint8_t *)m_vaddr + size;
720 /* start cpt request info struct at 8 byte boundary */
721 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
724 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
726 size += sizeof(struct cpt_request_info);
727 m_vaddr = (uint8_t *)m_vaddr + size;
730 if (unlikely(!(flags & VALID_IV_BUF))) {
732 iv_offset = ENCR_IV_OFFSET(d_offs);
735 if (unlikely(flags & VALID_AAD_BUF)) {
737 * When AAD is given, data above encr_offset is pass through
738 * Since AAD is given as separate pointer and not as offset,
739 * this is a special case as we need to fragment input data
740 * into passthrough + encr_data and then insert AAD in between.
742 if (hash_type != GMAC_TYPE) {
743 passthrough_len = encr_offset;
744 auth_offset = passthrough_len + iv_len;
745 encr_offset = passthrough_len + aad_len + iv_len;
746 auth_data_len = aad_len + encr_data_len;
748 passthrough_len = 16 + aad_len;
749 auth_offset = passthrough_len + iv_len;
750 auth_data_len = aad_len;
753 encr_offset += iv_len;
754 auth_offset += iv_len;
758 opcode.s.major = CPT_MAJOR_OP_FC;
761 if (hash_type == GMAC_TYPE) {
766 auth_dlen = auth_offset + auth_data_len;
767 enc_dlen = encr_data_len + encr_offset;
768 if (unlikely(encr_data_len & 0xf)) {
769 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
770 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
771 else if (likely((cipher_type == AES_CBC) ||
772 (cipher_type == AES_ECB)))
773 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
776 if (unlikely(auth_dlen > enc_dlen)) {
777 inputlen = auth_dlen;
778 outputlen = auth_dlen + mac_len;
781 outputlen = enc_dlen + mac_len;
786 vq_cmd_w0.s.param1 = encr_data_len;
787 vq_cmd_w0.s.param2 = auth_data_len;
789 * In 83XX since we have a limitation of
790 * IV & Offset control word not part of instruction
791 * and need to be part of Data Buffer, we check if
792 * head room is there and then only do the Direct mode processing
794 if (likely((flags & SINGLE_BUF_INPLACE) &&
795 (flags & SINGLE_BUF_HEADTAILROOM))) {
796 void *dm_vaddr = fc_params->bufs[0].vaddr;
797 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
799 * This flag indicates that there is 24 bytes head room and
800 * 8 bytes tail room available, so that we get to do
801 * DIRECT MODE with limitation
804 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
805 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
808 req->ist.ei1 = offset_dma;
809 /* RPTR should just exclude offset control word */
810 req->ist.ei2 = dm_dma_addr - iv_len;
811 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
812 + outputlen - iv_len);
814 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
816 vq_cmd_w0.s.opcode = opcode.flags;
818 if (likely(iv_len)) {
819 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
821 uint64_t *src = fc_params->iv_buf;
826 *(uint64_t *)offset_vaddr =
827 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
828 ((uint64_t)iv_offset << 8) |
829 ((uint64_t)auth_offset));
832 uint32_t i, g_size_bytes, s_size_bytes;
833 uint64_t dptr_dma, rptr_dma;
834 sg_comp_t *gather_comp;
835 sg_comp_t *scatter_comp;
838 /* This falls under strict SG mode */
839 offset_vaddr = m_vaddr;
841 size = OFF_CTRL_LEN + iv_len;
843 m_vaddr = (uint8_t *)m_vaddr + size;
846 opcode.s.major |= CPT_DMA_MODE;
848 vq_cmd_w0.s.opcode = opcode.flags;
850 if (likely(iv_len)) {
851 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
853 uint64_t *src = fc_params->iv_buf;
858 *(uint64_t *)offset_vaddr =
859 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
860 ((uint64_t)iv_offset << 8) |
861 ((uint64_t)auth_offset));
863 /* DPTR has SG list */
867 ((uint16_t *)in_buffer)[0] = 0;
868 ((uint16_t *)in_buffer)[1] = 0;
870 /* TODO Add error check if space will be sufficient */
871 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
879 /* Offset control word that includes iv */
880 i = fill_sg_comp(gather_comp, i, offset_dma,
881 OFF_CTRL_LEN + iv_len);
884 size = inputlen - iv_len;
886 uint32_t aad_offset = aad_len ? passthrough_len : 0;
888 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
889 i = fill_sg_comp_from_buf_min(gather_comp, i,
893 i = fill_sg_comp_from_iov(gather_comp, i,
896 aad_buf, aad_offset);
899 if (unlikely(size)) {
900 CPT_LOG_DP_ERR("Insufficient buffer space,"
901 " size %d needed", size);
905 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
906 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
909 * Output Scatter list
913 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
916 if (likely(iv_len)) {
917 i = fill_sg_comp(scatter_comp, i,
918 offset_dma + OFF_CTRL_LEN,
922 /* output data or output data + digest*/
923 if (unlikely(flags & VALID_MAC_BUF)) {
924 size = outputlen - iv_len - mac_len;
926 uint32_t aad_offset =
927 aad_len ? passthrough_len : 0;
929 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
930 i = fill_sg_comp_from_buf_min(
936 i = fill_sg_comp_from_iov(scatter_comp,
944 if (unlikely(size)) {
945 CPT_LOG_DP_ERR("Insufficient buffer"
946 " space, size %d needed",
953 i = fill_sg_comp_from_buf(scatter_comp, i,
954 &fc_params->mac_buf);
957 /* Output including mac */
958 size = outputlen - iv_len;
960 uint32_t aad_offset =
961 aad_len ? passthrough_len : 0;
963 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
964 i = fill_sg_comp_from_buf_min(
970 i = fill_sg_comp_from_iov(scatter_comp,
978 if (unlikely(size)) {
979 CPT_LOG_DP_ERR("Insufficient buffer"
980 " space, size %d needed",
986 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
987 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
989 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
991 /* This is DPTR len incase of SG mode */
992 vq_cmd_w0.s.dlen = size;
994 m_vaddr = (uint8_t *)m_vaddr + size;
997 /* cpt alternate completion address saved earlier */
998 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
999 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1000 rptr_dma = c_dma - 8;
1002 req->ist.ei1 = dptr_dma;
1003 req->ist.ei2 = rptr_dma;
1006 ctx_dma = fc_params->ctx_buf.dma_addr +
1007 offsetof(struct cpt_ctx, fctx);
1010 vq_cmd_w3.s.grp = 0;
1011 vq_cmd_w3.s.cptr = ctx_dma;
1013 /* 16 byte aligned cpt res address */
1014 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1015 *req->completion_addr = COMPLETION_CODE_INIT;
1016 req->comp_baddr = c_dma;
1018 /* Fill microcode part of instruction */
1019 req->ist.ei0 = vq_cmd_w0.u64;
1020 req->ist.ei3 = vq_cmd_w3.u64;
1028 static __rte_always_inline void
1029 cpt_dec_hmac_prep(uint32_t flags,
1032 fc_params_t *fc_params,
1036 uint32_t iv_offset = 0, size;
1037 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1038 struct cpt_ctx *cpt_ctx;
1039 int32_t hash_type, mac_len;
1040 uint8_t iv_len = 16;
1041 struct cpt_request_info *req;
1042 buf_ptr_t *meta_p, *aad_buf = NULL;
1043 uint32_t encr_offset, auth_offset;
1044 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1045 uint32_t passthrough_len = 0;
1046 void *m_vaddr, *offset_vaddr;
1047 uint64_t m_dma, offset_dma, ctx_dma;
1048 opcode_info_t opcode;
1049 vq_cmd_word0_t vq_cmd_w0;
1050 vq_cmd_word3_t vq_cmd_w3;
1054 meta_p = &fc_params->meta_buf;
1055 m_vaddr = meta_p->vaddr;
1056 m_dma = meta_p->dma_addr;
1058 encr_offset = ENCR_OFFSET(d_offs);
1059 auth_offset = AUTH_OFFSET(d_offs);
1060 encr_data_len = ENCR_DLEN(d_lens);
1061 auth_data_len = AUTH_DLEN(d_lens);
1063 if (unlikely(flags & VALID_AAD_BUF)) {
1065 * We dont support both aad
1066 * and auth data separately
1070 aad_len = fc_params->aad_buf.size;
1071 aad_buf = &fc_params->aad_buf;
1074 cpt_ctx = fc_params->ctx_buf.vaddr;
1075 hash_type = cpt_ctx->hash_type;
1076 mac_len = cpt_ctx->mac_len;
1078 if (unlikely(!(flags & VALID_IV_BUF))) {
1080 iv_offset = ENCR_IV_OFFSET(d_offs);
1083 if (unlikely(flags & VALID_AAD_BUF)) {
1085 * When AAD is given, data above encr_offset is pass through
1086 * Since AAD is given as separate pointer and not as offset,
1087 * this is a special case as we need to fragment input data
1088 * into passthrough + encr_data and then insert AAD in between.
1090 if (hash_type != GMAC_TYPE) {
1091 passthrough_len = encr_offset;
1092 auth_offset = passthrough_len + iv_len;
1093 encr_offset = passthrough_len + aad_len + iv_len;
1094 auth_data_len = aad_len + encr_data_len;
1096 passthrough_len = 16 + aad_len;
1097 auth_offset = passthrough_len + iv_len;
1098 auth_data_len = aad_len;
1101 encr_offset += iv_len;
1102 auth_offset += iv_len;
1106 * Save initial space that followed app data for completion code &
1107 * alternate completion code to fall in same cache line as app data
1109 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1110 m_dma += COMPLETION_CODE_SIZE;
1111 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1113 c_vaddr = (uint8_t *)m_vaddr + size;
1114 c_dma = m_dma + size;
1115 size += sizeof(cpt_res_s_t);
1117 m_vaddr = (uint8_t *)m_vaddr + size;
1120 /* start cpt request info structure at 8 byte alignment */
1121 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1124 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1126 size += sizeof(struct cpt_request_info);
1127 m_vaddr = (uint8_t *)m_vaddr + size;
1131 opcode.s.major = CPT_MAJOR_OP_FC;
1134 if (hash_type == GMAC_TYPE) {
1139 enc_dlen = encr_offset + encr_data_len;
1140 auth_dlen = auth_offset + auth_data_len;
1142 if (auth_dlen > enc_dlen) {
1143 inputlen = auth_dlen + mac_len;
1144 outputlen = auth_dlen;
1146 inputlen = enc_dlen + mac_len;
1147 outputlen = enc_dlen;
1151 vq_cmd_w0.s.param1 = encr_data_len;
1152 vq_cmd_w0.s.param2 = auth_data_len;
1155 * In 83XX since we have a limitation of
1156 * IV & Offset control word not part of instruction
1157 * and need to be part of Data Buffer, we check if
1158 * head room is there and then only do the Direct mode processing
1160 if (likely((flags & SINGLE_BUF_INPLACE) &&
1161 (flags & SINGLE_BUF_HEADTAILROOM))) {
1162 void *dm_vaddr = fc_params->bufs[0].vaddr;
1163 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1165 * This flag indicates that there is 24 bytes head room and
1166 * 8 bytes tail room available, so that we get to do
1167 * DIRECT MODE with limitation
1170 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1171 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1172 req->ist.ei1 = offset_dma;
1174 /* RPTR should just exclude offset control word */
1175 req->ist.ei2 = dm_dma_addr - iv_len;
1177 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1178 outputlen - iv_len);
1179 /* since this is decryption,
1180 * don't touch the content of
1181 * alternate ccode space as it contains
1185 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1187 vq_cmd_w0.s.opcode = opcode.flags;
1189 if (likely(iv_len)) {
1190 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1192 uint64_t *src = fc_params->iv_buf;
1197 *(uint64_t *)offset_vaddr =
1198 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1199 ((uint64_t)iv_offset << 8) |
1200 ((uint64_t)auth_offset));
1203 uint64_t dptr_dma, rptr_dma;
1204 uint32_t g_size_bytes, s_size_bytes;
1205 sg_comp_t *gather_comp;
1206 sg_comp_t *scatter_comp;
1210 /* This falls under strict SG mode */
1211 offset_vaddr = m_vaddr;
1213 size = OFF_CTRL_LEN + iv_len;
1215 m_vaddr = (uint8_t *)m_vaddr + size;
1218 opcode.s.major |= CPT_DMA_MODE;
1220 vq_cmd_w0.s.opcode = opcode.flags;
1222 if (likely(iv_len)) {
1223 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1225 uint64_t *src = fc_params->iv_buf;
1230 *(uint64_t *)offset_vaddr =
1231 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1232 ((uint64_t)iv_offset << 8) |
1233 ((uint64_t)auth_offset));
1235 /* DPTR has SG list */
1236 in_buffer = m_vaddr;
1239 ((uint16_t *)in_buffer)[0] = 0;
1240 ((uint16_t *)in_buffer)[1] = 0;
1242 /* TODO Add error check if space will be sufficient */
1243 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1250 /* Offset control word that includes iv */
1251 i = fill_sg_comp(gather_comp, i, offset_dma,
1252 OFF_CTRL_LEN + iv_len);
1254 /* Add input data */
1255 if (flags & VALID_MAC_BUF) {
1256 size = inputlen - iv_len - mac_len;
1258 /* input data only */
1259 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1260 i = fill_sg_comp_from_buf_min(
1265 uint32_t aad_offset = aad_len ?
1266 passthrough_len : 0;
1268 i = fill_sg_comp_from_iov(gather_comp,
1275 if (unlikely(size)) {
1276 CPT_LOG_DP_ERR("Insufficient buffer"
1277 " space, size %d needed",
1285 i = fill_sg_comp_from_buf(gather_comp, i,
1286 &fc_params->mac_buf);
1289 /* input data + mac */
1290 size = inputlen - iv_len;
1292 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1293 i = fill_sg_comp_from_buf_min(
1298 uint32_t aad_offset = aad_len ?
1299 passthrough_len : 0;
1301 if (unlikely(!fc_params->src_iov)) {
1302 CPT_LOG_DP_ERR("Bad input args");
1306 i = fill_sg_comp_from_iov(
1314 if (unlikely(size)) {
1315 CPT_LOG_DP_ERR("Insufficient buffer"
1316 " space, size %d needed",
1322 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1323 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1326 * Output Scatter List
1331 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1335 i = fill_sg_comp(scatter_comp, i,
1336 offset_dma + OFF_CTRL_LEN,
1340 /* Add output data */
1341 size = outputlen - iv_len;
1343 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1344 /* handle single buffer here */
1345 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1349 uint32_t aad_offset = aad_len ?
1350 passthrough_len : 0;
1352 if (unlikely(!fc_params->dst_iov)) {
1353 CPT_LOG_DP_ERR("Bad input args");
1357 i = fill_sg_comp_from_iov(scatter_comp, i,
1358 fc_params->dst_iov, 0,
1363 if (unlikely(size)) {
1364 CPT_LOG_DP_ERR("Insufficient buffer space,"
1365 " size %d needed", size);
1370 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1371 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1373 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1375 /* This is DPTR len incase of SG mode */
1376 vq_cmd_w0.s.dlen = size;
1378 m_vaddr = (uint8_t *)m_vaddr + size;
1381 /* cpt alternate completion address saved earlier */
1382 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1383 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1384 rptr_dma = c_dma - 8;
1385 size += COMPLETION_CODE_SIZE;
1387 req->ist.ei1 = dptr_dma;
1388 req->ist.ei2 = rptr_dma;
1391 ctx_dma = fc_params->ctx_buf.dma_addr +
1392 offsetof(struct cpt_ctx, fctx);
1395 vq_cmd_w3.s.grp = 0;
1396 vq_cmd_w3.s.cptr = ctx_dma;
1398 /* 16 byte aligned cpt res address */
1399 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1400 *req->completion_addr = COMPLETION_CODE_INIT;
1401 req->comp_baddr = c_dma;
1403 /* Fill microcode part of instruction */
1404 req->ist.ei0 = vq_cmd_w0.u64;
1405 req->ist.ei3 = vq_cmd_w3.u64;
1413 static __rte_always_inline void
1414 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1417 fc_params_t *params,
1422 int32_t inputlen, outputlen;
1423 struct cpt_ctx *cpt_ctx;
1424 uint32_t mac_len = 0;
1426 struct cpt_request_info *req;
1428 uint32_t encr_offset = 0, auth_offset = 0;
1429 uint32_t encr_data_len = 0, auth_data_len = 0;
1430 int flags, iv_len = 16;
1431 void *m_vaddr, *c_vaddr;
1432 uint64_t m_dma, c_dma, offset_ctrl;
1433 uint64_t *offset_vaddr, offset_dma;
1434 uint32_t *iv_s, iv[4];
1435 vq_cmd_word0_t vq_cmd_w0;
1436 vq_cmd_word3_t vq_cmd_w3;
1437 opcode_info_t opcode;
1439 buf_p = ¶ms->meta_buf;
1440 m_vaddr = buf_p->vaddr;
1441 m_dma = buf_p->dma_addr;
1443 cpt_ctx = params->ctx_buf.vaddr;
1444 flags = cpt_ctx->zsk_flags;
1445 mac_len = cpt_ctx->mac_len;
1446 snow3g = cpt_ctx->snow3g;
1449 * Save initial space that followed app data for completion code &
1450 * alternate completion code to fall in same cache line as app data
1452 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1453 m_dma += COMPLETION_CODE_SIZE;
1454 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1457 c_vaddr = (uint8_t *)m_vaddr + size;
1458 c_dma = m_dma + size;
1459 size += sizeof(cpt_res_s_t);
1461 m_vaddr = (uint8_t *)m_vaddr + size;
1464 /* Reserve memory for cpt request info */
1467 size = sizeof(struct cpt_request_info);
1468 m_vaddr = (uint8_t *)m_vaddr + size;
1471 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1473 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1475 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1476 (0 << 3) | (flags & 0x7));
1480 * Microcode expects offsets in bytes
1481 * TODO: Rounding off
1483 auth_data_len = AUTH_DLEN(d_lens);
1486 auth_offset = AUTH_OFFSET(d_offs);
1487 auth_offset = auth_offset / 8;
1489 /* consider iv len */
1490 auth_offset += iv_len;
1492 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1493 outputlen = mac_len;
1495 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1500 * Microcode expects offsets in bytes
1501 * TODO: Rounding off
1503 encr_data_len = ENCR_DLEN(d_lens);
1505 encr_offset = ENCR_OFFSET(d_offs);
1506 encr_offset = encr_offset / 8;
1507 /* consider iv len */
1508 encr_offset += iv_len;
1510 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1511 outputlen = inputlen;
1513 /* iv offset is 0 */
1514 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1518 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1523 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1524 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1527 for (j = 0; j < 4; j++)
1528 iv[j] = iv_s[3 - j];
1530 /* ZUC doesn't need a swap */
1531 for (j = 0; j < 4; j++)
1536 * GP op header, lengths are expected in bits.
1539 vq_cmd_w0.s.param1 = encr_data_len;
1540 vq_cmd_w0.s.param2 = auth_data_len;
1543 * In 83XX since we have a limitation of
1544 * IV & Offset control word not part of instruction
1545 * and need to be part of Data Buffer, we check if
1546 * head room is there and then only do the Direct mode processing
1548 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1549 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1550 void *dm_vaddr = params->bufs[0].vaddr;
1551 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1553 * This flag indicates that there is 24 bytes head room and
1554 * 8 bytes tail room available, so that we get to do
1555 * DIRECT MODE with limitation
1558 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1559 OFF_CTRL_LEN - iv_len);
1560 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1563 req->ist.ei1 = offset_dma;
1564 /* RPTR should just exclude offset control word */
1565 req->ist.ei2 = dm_dma_addr - iv_len;
1566 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1567 + outputlen - iv_len);
1569 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1571 vq_cmd_w0.s.opcode = opcode.flags;
1573 if (likely(iv_len)) {
1574 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1576 memcpy(iv_d, iv, 16);
1579 *offset_vaddr = offset_ctrl;
1581 uint32_t i, g_size_bytes, s_size_bytes;
1582 uint64_t dptr_dma, rptr_dma;
1583 sg_comp_t *gather_comp;
1584 sg_comp_t *scatter_comp;
1588 /* save space for iv */
1589 offset_vaddr = m_vaddr;
1592 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1593 m_dma += OFF_CTRL_LEN + iv_len;
1595 opcode.s.major |= CPT_DMA_MODE;
1597 vq_cmd_w0.s.opcode = opcode.flags;
1599 /* DPTR has SG list */
1600 in_buffer = m_vaddr;
1603 ((uint16_t *)in_buffer)[0] = 0;
1604 ((uint16_t *)in_buffer)[1] = 0;
1606 /* TODO Add error check if space will be sufficient */
1607 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1614 /* Offset control word followed by iv */
1616 i = fill_sg_comp(gather_comp, i, offset_dma,
1617 OFF_CTRL_LEN + iv_len);
1619 /* iv offset is 0 */
1620 *offset_vaddr = offset_ctrl;
1622 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1623 memcpy(iv_d, iv, 16);
1626 size = inputlen - iv_len;
1628 i = fill_sg_comp_from_iov(gather_comp, i,
1631 if (unlikely(size)) {
1632 CPT_LOG_DP_ERR("Insufficient buffer space,"
1633 " size %d needed", size);
1637 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1638 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1641 * Output Scatter List
1646 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1649 /* IV in SLIST only for EEA3 & UEA2 */
1654 i = fill_sg_comp(scatter_comp, i,
1655 offset_dma + OFF_CTRL_LEN, iv_len);
1658 /* Add output data */
1659 if (req_flags & VALID_MAC_BUF) {
1660 size = outputlen - iv_len - mac_len;
1662 i = fill_sg_comp_from_iov(scatter_comp, i,
1666 if (unlikely(size)) {
1667 CPT_LOG_DP_ERR("Insufficient buffer space,"
1668 " size %d needed", size);
1675 i = fill_sg_comp_from_buf(scatter_comp, i,
1679 /* Output including mac */
1680 size = outputlen - iv_len;
1682 i = fill_sg_comp_from_iov(scatter_comp, i,
1686 if (unlikely(size)) {
1687 CPT_LOG_DP_ERR("Insufficient buffer space,"
1688 " size %d needed", size);
1693 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1694 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1696 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1698 /* This is DPTR len incase of SG mode */
1699 vq_cmd_w0.s.dlen = size;
1701 m_vaddr = (uint8_t *)m_vaddr + size;
1704 /* cpt alternate completion address saved earlier */
1705 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1706 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1707 rptr_dma = c_dma - 8;
1709 req->ist.ei1 = dptr_dma;
1710 req->ist.ei2 = rptr_dma;
1715 vq_cmd_w3.s.grp = 0;
1716 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1717 offsetof(struct cpt_ctx, zs_ctx);
1719 /* 16 byte aligned cpt res address */
1720 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1721 *req->completion_addr = COMPLETION_CODE_INIT;
1722 req->comp_baddr = c_dma;
1724 /* Fill microcode part of instruction */
1725 req->ist.ei0 = vq_cmd_w0.u64;
1726 req->ist.ei3 = vq_cmd_w3.u64;
1734 static __rte_always_inline void
1735 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1738 fc_params_t *params,
1743 int32_t inputlen = 0, outputlen;
1744 struct cpt_ctx *cpt_ctx;
1745 uint8_t snow3g, iv_len = 16;
1746 struct cpt_request_info *req;
1748 uint32_t encr_offset;
1749 uint32_t encr_data_len;
1751 void *m_vaddr, *c_vaddr;
1752 uint64_t m_dma, c_dma;
1753 uint64_t *offset_vaddr, offset_dma;
1754 uint32_t *iv_s, iv[4], j;
1755 vq_cmd_word0_t vq_cmd_w0;
1756 vq_cmd_word3_t vq_cmd_w3;
1757 opcode_info_t opcode;
1759 buf_p = ¶ms->meta_buf;
1760 m_vaddr = buf_p->vaddr;
1761 m_dma = buf_p->dma_addr;
1764 * Microcode expects offsets in bytes
1765 * TODO: Rounding off
1767 encr_offset = ENCR_OFFSET(d_offs) / 8;
1768 encr_data_len = ENCR_DLEN(d_lens);
1770 cpt_ctx = params->ctx_buf.vaddr;
1771 flags = cpt_ctx->zsk_flags;
1772 snow3g = cpt_ctx->snow3g;
1774 * Save initial space that followed app data for completion code &
1775 * alternate completion code to fall in same cache line as app data
1777 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1778 m_dma += COMPLETION_CODE_SIZE;
1779 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1782 c_vaddr = (uint8_t *)m_vaddr + size;
1783 c_dma = m_dma + size;
1784 size += sizeof(cpt_res_s_t);
1786 m_vaddr = (uint8_t *)m_vaddr + size;
1789 /* Reserve memory for cpt request info */
1792 size = sizeof(struct cpt_request_info);
1793 m_vaddr = (uint8_t *)m_vaddr + size;
1796 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1798 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1800 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1801 (0 << 3) | (flags & 0x7));
1803 /* consider iv len */
1804 encr_offset += iv_len;
1806 inputlen = encr_offset +
1807 (RTE_ALIGN(encr_data_len, 8) / 8);
1808 outputlen = inputlen;
1811 iv_s = params->iv_buf;
1814 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1815 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1818 for (j = 0; j < 4; j++)
1819 iv[j] = iv_s[3 - j];
1821 /* ZUC doesn't need a swap */
1822 for (j = 0; j < 4; j++)
1827 * GP op header, lengths are expected in bits.
1830 vq_cmd_w0.s.param1 = encr_data_len;
1833 * In 83XX since we have a limitation of
1834 * IV & Offset control word not part of instruction
1835 * and need to be part of Data Buffer, we check if
1836 * head room is there and then only do the Direct mode processing
1838 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1839 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1840 void *dm_vaddr = params->bufs[0].vaddr;
1841 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1843 * This flag indicates that there is 24 bytes head room and
1844 * 8 bytes tail room available, so that we get to do
1845 * DIRECT MODE with limitation
1848 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1849 OFF_CTRL_LEN - iv_len);
1850 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1853 req->ist.ei1 = offset_dma;
1854 /* RPTR should just exclude offset control word */
1855 req->ist.ei2 = dm_dma_addr - iv_len;
1856 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1857 + outputlen - iv_len);
1859 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1861 vq_cmd_w0.s.opcode = opcode.flags;
1863 if (likely(iv_len)) {
1864 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1866 memcpy(iv_d, iv, 16);
1869 /* iv offset is 0 */
1870 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1872 uint32_t i, g_size_bytes, s_size_bytes;
1873 uint64_t dptr_dma, rptr_dma;
1874 sg_comp_t *gather_comp;
1875 sg_comp_t *scatter_comp;
1879 /* save space for offset and iv... */
1880 offset_vaddr = m_vaddr;
1883 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1884 m_dma += OFF_CTRL_LEN + iv_len;
1886 opcode.s.major |= CPT_DMA_MODE;
1888 vq_cmd_w0.s.opcode = opcode.flags;
1890 /* DPTR has SG list */
1891 in_buffer = m_vaddr;
1894 ((uint16_t *)in_buffer)[0] = 0;
1895 ((uint16_t *)in_buffer)[1] = 0;
1897 /* TODO Add error check if space will be sufficient */
1898 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1905 /* Offset control word */
1907 /* iv offset is 0 */
1908 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1910 i = fill_sg_comp(gather_comp, i, offset_dma,
1911 OFF_CTRL_LEN + iv_len);
1913 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1914 memcpy(iv_d, iv, 16);
1916 /* Add input data */
1917 size = inputlen - iv_len;
1919 i = fill_sg_comp_from_iov(gather_comp, i,
1922 if (unlikely(size)) {
1923 CPT_LOG_DP_ERR("Insufficient buffer space,"
1924 " size %d needed", size);
1928 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1929 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1932 * Output Scatter List
1937 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1940 i = fill_sg_comp(scatter_comp, i,
1941 offset_dma + OFF_CTRL_LEN,
1944 /* Add output data */
1945 size = outputlen - iv_len;
1947 i = fill_sg_comp_from_iov(scatter_comp, i,
1951 if (unlikely(size)) {
1952 CPT_LOG_DP_ERR("Insufficient buffer space,"
1953 " size %d needed", size);
1957 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1958 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1960 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1962 /* This is DPTR len incase of SG mode */
1963 vq_cmd_w0.s.dlen = size;
1965 m_vaddr = (uint8_t *)m_vaddr + size;
1968 /* cpt alternate completion address saved earlier */
1969 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1970 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1971 rptr_dma = c_dma - 8;
1973 req->ist.ei1 = dptr_dma;
1974 req->ist.ei2 = rptr_dma;
1979 vq_cmd_w3.s.grp = 0;
1980 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1981 offsetof(struct cpt_ctx, zs_ctx);
1983 /* 16 byte aligned cpt res address */
1984 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1985 *req->completion_addr = COMPLETION_CODE_INIT;
1986 req->comp_baddr = c_dma;
1988 /* Fill microcode part of instruction */
1989 req->ist.ei0 = vq_cmd_w0.u64;
1990 req->ist.ei3 = vq_cmd_w3.u64;
1998 static __rte_always_inline void
1999 cpt_kasumi_enc_prep(uint32_t req_flags,
2002 fc_params_t *params,
2007 int32_t inputlen = 0, outputlen = 0;
2008 struct cpt_ctx *cpt_ctx;
2009 uint32_t mac_len = 0;
2011 struct cpt_request_info *req;
2013 uint32_t encr_offset, auth_offset;
2014 uint32_t encr_data_len, auth_data_len;
2016 uint8_t *iv_s, *iv_d, iv_len = 8;
2018 void *m_vaddr, *c_vaddr;
2019 uint64_t m_dma, c_dma;
2020 uint64_t *offset_vaddr, offset_dma;
2021 vq_cmd_word0_t vq_cmd_w0;
2022 vq_cmd_word3_t vq_cmd_w3;
2023 opcode_info_t opcode;
2025 uint32_t g_size_bytes, s_size_bytes;
2026 uint64_t dptr_dma, rptr_dma;
2027 sg_comp_t *gather_comp;
2028 sg_comp_t *scatter_comp;
2030 buf_p = ¶ms->meta_buf;
2031 m_vaddr = buf_p->vaddr;
2032 m_dma = buf_p->dma_addr;
2034 encr_offset = ENCR_OFFSET(d_offs) / 8;
2035 auth_offset = AUTH_OFFSET(d_offs) / 8;
2036 encr_data_len = ENCR_DLEN(d_lens);
2037 auth_data_len = AUTH_DLEN(d_lens);
2039 cpt_ctx = params->ctx_buf.vaddr;
2040 flags = cpt_ctx->zsk_flags;
2041 mac_len = cpt_ctx->mac_len;
2044 iv_s = params->iv_buf;
2046 iv_s = params->auth_iv_buf;
2048 dir = iv_s[8] & 0x1;
2051 * Save initial space that followed app data for completion code &
2052 * alternate completion code to fall in same cache line as app data
2054 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2055 m_dma += COMPLETION_CODE_SIZE;
2056 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2059 c_vaddr = (uint8_t *)m_vaddr + size;
2060 c_dma = m_dma + size;
2061 size += sizeof(cpt_res_s_t);
2063 m_vaddr = (uint8_t *)m_vaddr + size;
2066 /* Reserve memory for cpt request info */
2069 size = sizeof(struct cpt_request_info);
2070 m_vaddr = (uint8_t *)m_vaddr + size;
2073 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2075 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2076 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2077 (dir << 4) | (0 << 3) | (flags & 0x7));
2080 * GP op header, lengths are expected in bits.
2083 vq_cmd_w0.s.param1 = encr_data_len;
2084 vq_cmd_w0.s.param2 = auth_data_len;
2085 vq_cmd_w0.s.opcode = opcode.flags;
2087 /* consider iv len */
2089 encr_offset += iv_len;
2090 auth_offset += iv_len;
2093 /* save space for offset ctrl and iv */
2094 offset_vaddr = m_vaddr;
2097 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2098 m_dma += OFF_CTRL_LEN + iv_len;
2100 /* DPTR has SG list */
2101 in_buffer = m_vaddr;
2104 ((uint16_t *)in_buffer)[0] = 0;
2105 ((uint16_t *)in_buffer)[1] = 0;
2107 /* TODO Add error check if space will be sufficient */
2108 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2115 /* Offset control word followed by iv */
2118 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2119 outputlen = inputlen;
2120 /* iv offset is 0 */
2121 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2123 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2124 outputlen = mac_len;
2125 /* iv offset is 0 */
2126 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2129 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2132 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2133 memcpy(iv_d, iv_s, iv_len);
2136 size = inputlen - iv_len;
2138 i = fill_sg_comp_from_iov(gather_comp, i,
2142 if (unlikely(size)) {
2143 CPT_LOG_DP_ERR("Insufficient buffer space,"
2144 " size %d needed", size);
2148 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2149 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2152 * Output Scatter List
2156 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2159 /* IV in SLIST only for F8 */
2165 i = fill_sg_comp(scatter_comp, i,
2166 offset_dma + OFF_CTRL_LEN,
2170 /* Add output data */
2171 if (req_flags & VALID_MAC_BUF) {
2172 size = outputlen - iv_len - mac_len;
2174 i = fill_sg_comp_from_iov(scatter_comp, i,
2178 if (unlikely(size)) {
2179 CPT_LOG_DP_ERR("Insufficient buffer space,"
2180 " size %d needed", size);
2187 i = fill_sg_comp_from_buf(scatter_comp, i,
2191 /* Output including mac */
2192 size = outputlen - iv_len;
2194 i = fill_sg_comp_from_iov(scatter_comp, i,
2198 if (unlikely(size)) {
2199 CPT_LOG_DP_ERR("Insufficient buffer space,"
2200 " size %d needed", size);
2205 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2206 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2208 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2210 /* This is DPTR len incase of SG mode */
2211 vq_cmd_w0.s.dlen = size;
2213 m_vaddr = (uint8_t *)m_vaddr + size;
2216 /* cpt alternate completion address saved earlier */
2217 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2218 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2219 rptr_dma = c_dma - 8;
2221 req->ist.ei1 = dptr_dma;
2222 req->ist.ei2 = rptr_dma;
2226 vq_cmd_w3.s.grp = 0;
2227 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2228 offsetof(struct cpt_ctx, k_ctx);
2230 /* 16 byte aligned cpt res address */
2231 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2232 *req->completion_addr = COMPLETION_CODE_INIT;
2233 req->comp_baddr = c_dma;
2235 /* Fill microcode part of instruction */
2236 req->ist.ei0 = vq_cmd_w0.u64;
2237 req->ist.ei3 = vq_cmd_w3.u64;
2245 static __rte_always_inline void
2246 cpt_kasumi_dec_prep(uint64_t d_offs,
2248 fc_params_t *params,
2253 int32_t inputlen = 0, outputlen;
2254 struct cpt_ctx *cpt_ctx;
2255 uint8_t i = 0, iv_len = 8;
2256 struct cpt_request_info *req;
2258 uint32_t encr_offset;
2259 uint32_t encr_data_len;
2262 void *m_vaddr, *c_vaddr;
2263 uint64_t m_dma, c_dma;
2264 uint64_t *offset_vaddr, offset_dma;
2265 vq_cmd_word0_t vq_cmd_w0;
2266 vq_cmd_word3_t vq_cmd_w3;
2267 opcode_info_t opcode;
2269 uint32_t g_size_bytes, s_size_bytes;
2270 uint64_t dptr_dma, rptr_dma;
2271 sg_comp_t *gather_comp;
2272 sg_comp_t *scatter_comp;
2274 buf_p = ¶ms->meta_buf;
2275 m_vaddr = buf_p->vaddr;
2276 m_dma = buf_p->dma_addr;
2278 encr_offset = ENCR_OFFSET(d_offs) / 8;
2279 encr_data_len = ENCR_DLEN(d_lens);
2281 cpt_ctx = params->ctx_buf.vaddr;
2282 flags = cpt_ctx->zsk_flags;
2284 * Save initial space that followed app data for completion code &
2285 * alternate completion code to fall in same cache line as app data
2287 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2288 m_dma += COMPLETION_CODE_SIZE;
2289 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2292 c_vaddr = (uint8_t *)m_vaddr + size;
2293 c_dma = m_dma + size;
2294 size += sizeof(cpt_res_s_t);
2296 m_vaddr = (uint8_t *)m_vaddr + size;
2299 /* Reserve memory for cpt request info */
2302 size = sizeof(struct cpt_request_info);
2303 m_vaddr = (uint8_t *)m_vaddr + size;
2306 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2308 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2309 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2310 (dir << 4) | (0 << 3) | (flags & 0x7));
2313 * GP op header, lengths are expected in bits.
2316 vq_cmd_w0.s.param1 = encr_data_len;
2317 vq_cmd_w0.s.opcode = opcode.flags;
2319 /* consider iv len */
2320 encr_offset += iv_len;
2322 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2323 outputlen = inputlen;
2325 /* save space for offset ctrl & iv */
2326 offset_vaddr = m_vaddr;
2329 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2330 m_dma += OFF_CTRL_LEN + iv_len;
2332 /* DPTR has SG list */
2333 in_buffer = m_vaddr;
2336 ((uint16_t *)in_buffer)[0] = 0;
2337 ((uint16_t *)in_buffer)[1] = 0;
2339 /* TODO Add error check if space will be sufficient */
2340 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2347 /* Offset control word followed by iv */
2348 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2350 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2353 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2354 params->iv_buf, iv_len);
2356 /* Add input data */
2357 size = inputlen - iv_len;
2359 i = fill_sg_comp_from_iov(gather_comp, i,
2362 if (unlikely(size)) {
2363 CPT_LOG_DP_ERR("Insufficient buffer space,"
2364 " size %d needed", size);
2368 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2369 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2372 * Output Scatter List
2376 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2379 i = fill_sg_comp(scatter_comp, i,
2380 offset_dma + OFF_CTRL_LEN,
2383 /* Add output data */
2384 size = outputlen - iv_len;
2386 i = fill_sg_comp_from_iov(scatter_comp, i,
2389 if (unlikely(size)) {
2390 CPT_LOG_DP_ERR("Insufficient buffer space,"
2391 " size %d needed", size);
2395 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2396 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2398 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2400 /* This is DPTR len incase of SG mode */
2401 vq_cmd_w0.s.dlen = size;
2403 m_vaddr = (uint8_t *)m_vaddr + size;
2406 /* cpt alternate completion address saved earlier */
2407 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2408 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2409 rptr_dma = c_dma - 8;
2411 req->ist.ei1 = dptr_dma;
2412 req->ist.ei2 = rptr_dma;
2416 vq_cmd_w3.s.grp = 0;
2417 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2418 offsetof(struct cpt_ctx, k_ctx);
2420 /* 16 byte aligned cpt res address */
2421 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2422 *req->completion_addr = COMPLETION_CODE_INIT;
2423 req->comp_baddr = c_dma;
2425 /* Fill microcode part of instruction */
2426 req->ist.ei0 = vq_cmd_w0.u64;
2427 req->ist.ei3 = vq_cmd_w3.u64;
2435 static __rte_always_inline void *
2436 cpt_fc_dec_hmac_prep(uint32_t flags,
2439 fc_params_t *fc_params,
2442 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2444 void *prep_req = NULL;
2446 fc_type = ctx->fc_type;
2448 if (likely(fc_type == FC_GEN)) {
2449 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2451 } else if (fc_type == ZUC_SNOW3G) {
2452 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2454 } else if (fc_type == KASUMI) {
2455 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2459 * For AUTH_ONLY case,
2460 * MC only supports digest generation and verification
2461 * should be done in software by memcmp()
2467 static __rte_always_inline void *__rte_hot
2468 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2469 fc_params_t *fc_params, void *op)
2471 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2473 void *prep_req = NULL;
2475 fc_type = ctx->fc_type;
2477 /* Common api for rest of the ops */
2478 if (likely(fc_type == FC_GEN)) {
2479 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2481 } else if (fc_type == ZUC_SNOW3G) {
2482 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2484 } else if (fc_type == KASUMI) {
2485 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2487 } else if (fc_type == HASH_HMAC) {
2488 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2494 static __rte_always_inline int
2495 cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
2496 uint16_t key_len, uint16_t mac_len)
2498 struct cpt_ctx *cpt_ctx = ctx;
2499 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2501 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2506 /* No support for AEAD yet */
2507 if (cpt_ctx->enc_cipher)
2509 /* For ZUC/SNOW3G/Kasumi */
2512 cpt_ctx->snow3g = 1;
2513 gen_key_snow3g(key, keyx);
2514 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2515 cpt_ctx->fc_type = ZUC_SNOW3G;
2516 cpt_ctx->zsk_flags = 0x1;
2519 cpt_ctx->snow3g = 0;
2520 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2521 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2522 cpt_ctx->fc_type = ZUC_SNOW3G;
2523 cpt_ctx->zsk_flags = 0x1;
2526 /* Kasumi ECB mode */
2528 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2529 cpt_ctx->fc_type = KASUMI;
2530 cpt_ctx->zsk_flags = 0x1;
2533 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2534 cpt_ctx->fc_type = KASUMI;
2535 cpt_ctx->zsk_flags = 0x1;
2540 cpt_ctx->mac_len = 4;
2541 cpt_ctx->hash_type = type;
2545 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2546 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2547 cpt_ctx->fc_type = HASH_HMAC;
2550 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2553 /* For GMAC auth, cipher must be NULL */
2554 if (type == GMAC_TYPE)
2555 fctx->enc.enc_cipher = 0;
2557 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2558 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2562 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2563 memcpy(cpt_ctx->auth_key, key, key_len);
2564 cpt_ctx->auth_key_len = key_len;
2565 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2566 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2569 memcpy(fctx->hmac.opad, key, key_len);
2570 fctx->enc.auth_input_type = 1;
2575 static __rte_always_inline int
2576 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2577 struct cpt_sess_misc *sess)
2579 struct rte_crypto_aead_xform *aead_form;
2580 cipher_type_t enc_type = 0; /* NULL Cipher type */
2581 auth_type_t auth_type = 0; /* NULL Auth type */
2582 uint32_t cipher_key_len = 0;
2583 uint8_t aes_gcm = 0;
2584 aead_form = &xform->aead;
2585 void *ctx = SESS_PRIV(sess);
2587 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2588 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2589 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2590 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2591 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2592 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2594 CPT_LOG_DP_ERR("Unknown aead operation\n");
2597 switch (aead_form->algo) {
2598 case RTE_CRYPTO_AEAD_AES_GCM:
2600 cipher_key_len = 16;
2603 case RTE_CRYPTO_AEAD_AES_CCM:
2604 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2607 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2608 enc_type = CHACHA20;
2609 auth_type = POLY1305;
2610 cipher_key_len = 32;
2611 sess->chacha_poly = 1;
2614 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2618 if (aead_form->key.length < cipher_key_len) {
2619 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2620 (unsigned int long)aead_form->key.length);
2624 sess->aes_gcm = aes_gcm;
2625 sess->mac_len = aead_form->digest_length;
2626 sess->iv_offset = aead_form->iv.offset;
2627 sess->iv_length = aead_form->iv.length;
2628 sess->aad_length = aead_form->aad_length;
2630 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2631 aead_form->key.length, NULL)))
2634 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2635 aead_form->digest_length)))
2641 static __rte_always_inline int
2642 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2643 struct cpt_sess_misc *sess)
2645 struct rte_crypto_cipher_xform *c_form;
2646 cipher_type_t enc_type = 0; /* NULL Cipher type */
2647 uint32_t cipher_key_len = 0;
2648 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2650 c_form = &xform->cipher;
2652 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2653 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2654 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2655 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2657 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2661 switch (c_form->algo) {
2662 case RTE_CRYPTO_CIPHER_AES_CBC:
2664 cipher_key_len = 16;
2666 case RTE_CRYPTO_CIPHER_3DES_CBC:
2667 enc_type = DES3_CBC;
2668 cipher_key_len = 24;
2670 case RTE_CRYPTO_CIPHER_DES_CBC:
2671 /* DES is implemented using 3DES in hardware */
2672 enc_type = DES3_CBC;
2675 case RTE_CRYPTO_CIPHER_AES_CTR:
2677 cipher_key_len = 16;
2680 case RTE_CRYPTO_CIPHER_NULL:
2684 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2685 enc_type = KASUMI_F8_ECB;
2686 cipher_key_len = 16;
2689 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2690 enc_type = SNOW3G_UEA2;
2691 cipher_key_len = 16;
2694 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2695 enc_type = ZUC_EEA3;
2696 cipher_key_len = 16;
2699 case RTE_CRYPTO_CIPHER_AES_XTS:
2701 cipher_key_len = 16;
2703 case RTE_CRYPTO_CIPHER_3DES_ECB:
2704 enc_type = DES3_ECB;
2705 cipher_key_len = 24;
2707 case RTE_CRYPTO_CIPHER_AES_ECB:
2709 cipher_key_len = 16;
2711 case RTE_CRYPTO_CIPHER_3DES_CTR:
2712 case RTE_CRYPTO_CIPHER_AES_F8:
2713 case RTE_CRYPTO_CIPHER_ARC4:
2714 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2718 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2723 if (c_form->key.length < cipher_key_len) {
2724 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2725 (unsigned long) c_form->key.length);
2729 sess->zsk_flag = zsk_flag;
2731 sess->aes_ctr = aes_ctr;
2732 sess->iv_offset = c_form->iv.offset;
2733 sess->iv_length = c_form->iv.length;
2734 sess->is_null = is_null;
2736 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2737 c_form->key.data, c_form->key.length, NULL)))
2743 static __rte_always_inline int
2744 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2745 struct cpt_sess_misc *sess)
2747 struct rte_crypto_auth_xform *a_form;
2748 auth_type_t auth_type = 0; /* NULL Auth type */
2749 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2751 a_form = &xform->auth;
2753 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2754 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2755 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2756 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2758 CPT_LOG_DP_ERR("Unknown auth operation");
2762 switch (a_form->algo) {
2763 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2765 case RTE_CRYPTO_AUTH_SHA1:
2766 auth_type = SHA1_TYPE;
2768 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2769 case RTE_CRYPTO_AUTH_SHA256:
2770 auth_type = SHA2_SHA256;
2772 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2773 case RTE_CRYPTO_AUTH_SHA512:
2774 auth_type = SHA2_SHA512;
2776 case RTE_CRYPTO_AUTH_AES_GMAC:
2777 auth_type = GMAC_TYPE;
2780 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2781 case RTE_CRYPTO_AUTH_SHA224:
2782 auth_type = SHA2_SHA224;
2784 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2785 case RTE_CRYPTO_AUTH_SHA384:
2786 auth_type = SHA2_SHA384;
2788 case RTE_CRYPTO_AUTH_MD5_HMAC:
2789 case RTE_CRYPTO_AUTH_MD5:
2790 auth_type = MD5_TYPE;
2792 case RTE_CRYPTO_AUTH_KASUMI_F9:
2793 auth_type = KASUMI_F9_ECB;
2795 * Indicate that direction needs to be taken out
2800 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2801 auth_type = SNOW3G_UIA2;
2804 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2805 auth_type = ZUC_EIA3;
2808 case RTE_CRYPTO_AUTH_NULL:
2812 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2813 case RTE_CRYPTO_AUTH_AES_CMAC:
2814 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2815 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2819 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2824 sess->zsk_flag = zsk_flag;
2825 sess->aes_gcm = aes_gcm;
2826 sess->mac_len = a_form->digest_length;
2827 sess->is_null = is_null;
2829 sess->auth_iv_offset = a_form->iv.offset;
2830 sess->auth_iv_length = a_form->iv.length;
2832 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2833 a_form->key.data, a_form->key.length,
2834 a_form->digest_length)))
2840 static __rte_always_inline int
2841 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2842 struct cpt_sess_misc *sess)
2844 struct rte_crypto_auth_xform *a_form;
2845 cipher_type_t enc_type = 0; /* NULL Cipher type */
2846 auth_type_t auth_type = 0; /* NULL Auth type */
2847 void *ctx = SESS_PRIV(sess);
2849 a_form = &xform->auth;
2851 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2852 sess->cpt_op |= CPT_OP_ENCODE;
2853 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2854 sess->cpt_op |= CPT_OP_DECODE;
2856 CPT_LOG_DP_ERR("Unknown auth operation");
2860 switch (a_form->algo) {
2861 case RTE_CRYPTO_AUTH_AES_GMAC:
2863 auth_type = GMAC_TYPE;
2866 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2874 sess->iv_offset = a_form->iv.offset;
2875 sess->iv_length = a_form->iv.length;
2876 sess->mac_len = a_form->digest_length;
2878 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2879 a_form->key.length, NULL)))
2882 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2883 a_form->digest_length)))
2889 static __rte_always_inline void *
2890 alloc_op_meta(struct rte_mbuf *m_src,
2893 struct rte_mempool *cpt_meta_pool)
2897 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2898 if (likely(m_src && (m_src->nb_segs == 1))) {
2902 /* Check if tailroom is sufficient to hold meta data */
2903 tailroom = rte_pktmbuf_tailroom(m_src);
2904 if (likely(tailroom > len + 8)) {
2905 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2906 mphys = m_src->buf_iova + m_src->buf_len;
2910 buf->dma_addr = mphys;
2912 /* Indicate that this is a mbuf allocated mdata */
2913 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2918 RTE_SET_USED(m_src);
2921 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2925 buf->dma_addr = rte_mempool_virt2iova(mdata);
2932 * cpt_free_metabuf - free metabuf to mempool.
2933 * @param instance: pointer to instance.
2934 * @param objp: pointer to the metabuf.
2936 static __rte_always_inline void
2937 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2939 bool nofree = ((uintptr_t)mdata & 1ull);
2943 rte_mempool_put(cpt_meta_pool, mdata);
2946 static __rte_always_inline uint32_t
2947 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2948 iov_ptr_t *iovec, uint32_t start_offset)
2951 void *seg_data = NULL;
2952 phys_addr_t seg_phys;
2953 int32_t seg_size = 0;
2960 if (!start_offset) {
2961 seg_data = rte_pktmbuf_mtod(pkt, void *);
2962 seg_phys = rte_pktmbuf_iova(pkt);
2963 seg_size = pkt->data_len;
2965 while (start_offset >= pkt->data_len) {
2966 start_offset -= pkt->data_len;
2970 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2971 seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2972 seg_size = pkt->data_len - start_offset;
2978 iovec->bufs[index].vaddr = seg_data;
2979 iovec->bufs[index].dma_addr = seg_phys;
2980 iovec->bufs[index].size = seg_size;
2984 while (unlikely(pkt != NULL)) {
2985 seg_data = rte_pktmbuf_mtod(pkt, void *);
2986 seg_phys = rte_pktmbuf_iova(pkt);
2987 seg_size = pkt->data_len;
2991 iovec->bufs[index].vaddr = seg_data;
2992 iovec->bufs[index].dma_addr = seg_phys;
2993 iovec->bufs[index].size = seg_size;
3000 iovec->buf_cnt = index;
3004 static __rte_always_inline uint32_t
3005 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3010 void *seg_data = NULL;
3011 phys_addr_t seg_phys;
3012 uint32_t seg_size = 0;
3015 seg_data = rte_pktmbuf_mtod(pkt, void *);
3016 seg_phys = rte_pktmbuf_iova(pkt);
3017 seg_size = pkt->data_len;
3020 if (likely(!pkt->next)) {
3021 uint32_t headroom, tailroom;
3023 *flags |= SINGLE_BUF_INPLACE;
3024 headroom = rte_pktmbuf_headroom(pkt);
3025 tailroom = rte_pktmbuf_tailroom(pkt);
3026 if (likely((headroom >= 24) &&
3028 /* In 83XX this is prerequivisit for Direct mode */
3029 *flags |= SINGLE_BUF_HEADTAILROOM;
3031 param->bufs[0].vaddr = seg_data;
3032 param->bufs[0].dma_addr = seg_phys;
3033 param->bufs[0].size = seg_size;
3036 iovec = param->src_iov;
3037 iovec->bufs[index].vaddr = seg_data;
3038 iovec->bufs[index].dma_addr = seg_phys;
3039 iovec->bufs[index].size = seg_size;
3043 while (unlikely(pkt != NULL)) {
3044 seg_data = rte_pktmbuf_mtod(pkt, void *);
3045 seg_phys = rte_pktmbuf_iova(pkt);
3046 seg_size = pkt->data_len;
3051 iovec->bufs[index].vaddr = seg_data;
3052 iovec->bufs[index].dma_addr = seg_phys;
3053 iovec->bufs[index].size = seg_size;
3060 iovec->buf_cnt = index;
3064 static __rte_always_inline int
3065 fill_fc_params(struct rte_crypto_op *cop,
3066 struct cpt_sess_misc *sess_misc,
3067 struct cpt_qp_meta_info *m_info,
3072 struct rte_crypto_sym_op *sym_op = cop->sym;
3075 uint32_t mc_hash_off;
3077 uint64_t d_offs, d_lens;
3078 struct rte_mbuf *m_src, *m_dst;
3079 uint8_t cpt_op = sess_misc->cpt_op;
3080 #ifdef CPT_ALWAYS_USE_SG_MODE
3081 uint8_t inplace = 0;
3083 uint8_t inplace = 1;
3085 fc_params_t fc_params;
3086 char src[SRC_IOV_SIZE];
3087 char dst[SRC_IOV_SIZE];
3091 if (likely(sess_misc->iv_length)) {
3092 flags |= VALID_IV_BUF;
3093 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3094 uint8_t *, sess_misc->iv_offset);
3095 if (sess_misc->aes_ctr &&
3096 unlikely(sess_misc->iv_length != 16)) {
3097 memcpy((uint8_t *)iv_buf,
3098 rte_crypto_op_ctod_offset(cop,
3099 uint8_t *, sess_misc->iv_offset), 12);
3100 iv_buf[3] = rte_cpu_to_be_32(0x1);
3101 fc_params.iv_buf = iv_buf;
3105 if (sess_misc->zsk_flag) {
3106 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3108 sess_misc->auth_iv_offset);
3109 if (sess_misc->zsk_flag != ZS_EA)
3112 m_src = sym_op->m_src;
3113 m_dst = sym_op->m_dst;
3115 if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3120 d_offs = sym_op->aead.data.offset;
3121 d_lens = sym_op->aead.data.length;
3122 mc_hash_off = sym_op->aead.data.offset +
3123 sym_op->aead.data.length;
3125 aad_data = sym_op->aead.aad.data;
3126 aad_len = sess_misc->aad_length;
3127 if (likely((aad_data + aad_len) ==
3128 rte_pktmbuf_mtod_offset(m_src,
3130 sym_op->aead.data.offset))) {
3131 d_offs = (d_offs - aad_len) | (d_offs << 16);
3132 d_lens = (d_lens + aad_len) | (d_lens << 32);
3134 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3135 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3136 fc_params.aad_buf.size = aad_len;
3137 flags |= VALID_AAD_BUF;
3139 d_offs = d_offs << 16;
3140 d_lens = d_lens << 32;
3143 salt = fc_params.iv_buf;
3144 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3145 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3146 sess_misc->salt = *(uint32_t *)salt;
3148 fc_params.iv_buf = salt + 4;
3149 if (likely(sess_misc->mac_len)) {
3150 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3156 /* hmac immediately following data is best case */
3157 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3159 (uint8_t *)sym_op->aead.digest.data)) {
3160 flags |= VALID_MAC_BUF;
3161 fc_params.mac_buf.size = sess_misc->mac_len;
3162 fc_params.mac_buf.vaddr =
3163 sym_op->aead.digest.data;
3164 fc_params.mac_buf.dma_addr =
3165 sym_op->aead.digest.phys_addr;
3170 d_offs = sym_op->cipher.data.offset;
3171 d_lens = sym_op->cipher.data.length;
3172 mc_hash_off = sym_op->cipher.data.offset +
3173 sym_op->cipher.data.length;
3174 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3175 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3177 if (mc_hash_off < (sym_op->auth.data.offset +
3178 sym_op->auth.data.length)){
3179 mc_hash_off = (sym_op->auth.data.offset +
3180 sym_op->auth.data.length);
3182 /* for gmac, salt should be updated like in gcm */
3183 if (unlikely(sess_misc->is_gmac)) {
3185 salt = fc_params.iv_buf;
3186 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3187 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3188 sess_misc->salt = *(uint32_t *)salt;
3190 fc_params.iv_buf = salt + 4;
3192 if (likely(sess_misc->mac_len)) {
3195 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3199 /* hmac immediately following data is best case */
3200 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3202 (uint8_t *)sym_op->auth.digest.data)) {
3203 flags |= VALID_MAC_BUF;
3204 fc_params.mac_buf.size =
3206 fc_params.mac_buf.vaddr =
3207 sym_op->auth.digest.data;
3208 fc_params.mac_buf.dma_addr =
3209 sym_op->auth.digest.phys_addr;
3214 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3215 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3217 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3220 if (likely(!m_dst && inplace)) {
3221 /* Case of single buffer without AAD buf or
3222 * separate mac buf in place and
3225 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3227 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3230 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3236 /* Out of place processing */
3237 fc_params.src_iov = (void *)src;
3238 fc_params.dst_iov = (void *)dst;
3240 /* Store SG I/O in the api for reuse */
3241 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3242 CPT_LOG_DP_ERR("Prepare src iov failed");
3247 if (unlikely(m_dst != NULL)) {
3250 /* Try to make room as much as src has */
3251 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3253 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3254 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3255 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3256 CPT_LOG_DP_ERR("Not enough space in "
3265 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3266 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3272 fc_params.dst_iov = (void *)src;
3276 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3277 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3278 m_info->lb_mlen, m_info->pool);
3280 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3281 m_info->sg_mlen, m_info->pool);
3283 if (unlikely(mdata == NULL)) {
3284 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3289 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3290 op[0] = (uintptr_t)mdata;
3291 op[1] = (uintptr_t)cop;
3292 op[2] = op[3] = 0; /* Used to indicate auth verify */
3293 space += 4 * sizeof(uint64_t);
3295 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3296 fc_params.meta_buf.dma_addr += space;
3297 fc_params.meta_buf.size -= space;
3299 /* Finally prepare the instruction */
3300 if (cpt_op & CPT_OP_ENCODE)
3301 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3304 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3307 if (unlikely(*prep_req == NULL)) {
3308 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3310 goto free_mdata_and_exit;
3317 free_mdata_and_exit:
3318 free_op_meta(mdata, m_info->pool);
3323 static __rte_always_inline void
3324 compl_auth_verify(struct rte_crypto_op *op,
3329 struct rte_crypto_sym_op *sym_op = op->sym;
3331 if (sym_op->auth.digest.data)
3332 mac = sym_op->auth.digest.data;
3334 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3336 sym_op->auth.data.length +
3337 sym_op->auth.data.offset);
3339 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3343 if (memcmp(mac, gen_mac, mac_len))
3344 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3346 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3349 static __rte_always_inline void
3350 find_kasumif9_direction_and_length(uint8_t *src,
3351 uint32_t counter_num_bytes,
3352 uint32_t *addr_length_in_bits,
3353 uint8_t *addr_direction)
3358 while (!found && counter_num_bytes > 0) {
3359 counter_num_bytes--;
3360 if (src[counter_num_bytes] == 0x00)
3362 pos = rte_bsf32(src[counter_num_bytes]);
3364 if (likely(counter_num_bytes > 0)) {
3365 last_byte = src[counter_num_bytes - 1];
3366 *addr_direction = last_byte & 0x1;
3367 *addr_length_in_bits = counter_num_bytes * 8
3371 last_byte = src[counter_num_bytes];
3372 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3373 *addr_length_in_bits = counter_num_bytes * 8
3381 * This handles all auth only except AES_GMAC
3383 static __rte_always_inline int
3384 fill_digest_params(struct rte_crypto_op *cop,
3385 struct cpt_sess_misc *sess,
3386 struct cpt_qp_meta_info *m_info,
3391 struct rte_crypto_sym_op *sym_op = cop->sym;
3395 uint32_t auth_range_off;
3397 uint64_t d_offs = 0, d_lens;
3398 struct rte_mbuf *m_src, *m_dst;
3399 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3400 uint16_t mac_len = sess->mac_len;
3402 char src[SRC_IOV_SIZE];
3406 memset(¶ms, 0, sizeof(fc_params_t));
3408 m_src = sym_op->m_src;
3410 /* For just digest lets force mempool alloc */
3411 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3413 if (mdata == NULL) {
3418 mphys = params.meta_buf.dma_addr;
3421 op[0] = (uintptr_t)mdata;
3422 op[1] = (uintptr_t)cop;
3423 op[2] = op[3] = 0; /* Used to indicate auth verify */
3424 space += 4 * sizeof(uint64_t);
3426 auth_range_off = sym_op->auth.data.offset;
3428 flags = VALID_MAC_BUF;
3429 params.src_iov = (void *)src;
3430 if (unlikely(sess->zsk_flag)) {
3432 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3433 * we will send pass through even for auth only case,
3436 d_offs = auth_range_off;
3438 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3439 uint8_t *, sess->auth_iv_offset);
3440 if (sess->zsk_flag == K_F9) {
3441 uint32_t length_in_bits, num_bytes;
3442 uint8_t *src, direction = 0;
3444 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3447 * This is kasumi f9, take direction from
3450 length_in_bits = cop->sym->auth.data.length;
3451 num_bytes = (length_in_bits >> 3);
3452 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3453 find_kasumif9_direction_and_length(src,
3457 length_in_bits -= 64;
3458 cop->sym->auth.data.offset += 64;
3459 d_offs = cop->sym->auth.data.offset;
3460 auth_range_off = d_offs / 8;
3461 cop->sym->auth.data.length = length_in_bits;
3463 /* Store it at end of auth iv */
3464 iv_buf[8] = direction;
3465 params.auth_iv_buf = iv_buf;
3469 d_lens = sym_op->auth.data.length;
3471 params.ctx_buf.vaddr = SESS_PRIV(sess);
3472 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3474 if (auth_op == CPT_OP_AUTH_GENERATE) {
3475 if (sym_op->auth.digest.data) {
3477 * Digest to be generated
3478 * in separate buffer
3480 params.mac_buf.size =
3482 params.mac_buf.vaddr =
3483 sym_op->auth.digest.data;
3484 params.mac_buf.dma_addr =
3485 sym_op->auth.digest.phys_addr;
3487 uint32_t off = sym_op->auth.data.offset +
3488 sym_op->auth.data.length;
3489 int32_t dlen, space;
3491 m_dst = sym_op->m_dst ?
3492 sym_op->m_dst : sym_op->m_src;
3493 dlen = rte_pktmbuf_pkt_len(m_dst);
3495 space = off + mac_len - dlen;
3497 if (!rte_pktmbuf_append(m_dst, space)) {
3498 CPT_LOG_DP_ERR("Failed to extend "
3499 "mbuf by %uB", space);
3501 goto free_mdata_and_exit;
3504 params.mac_buf.vaddr =
3505 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3506 params.mac_buf.dma_addr =
3507 rte_pktmbuf_iova_offset(m_dst, off);
3508 params.mac_buf.size = mac_len;
3511 /* Need space for storing generated mac */
3512 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3513 params.mac_buf.dma_addr = mphys + space;
3514 params.mac_buf.size = mac_len;
3515 space += RTE_ALIGN_CEIL(mac_len, 8);
3516 op[2] = (uintptr_t)params.mac_buf.vaddr;
3520 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3521 params.meta_buf.dma_addr = mphys + space;
3522 params.meta_buf.size -= space;
3524 /* Out of place processing */
3525 params.src_iov = (void *)src;
3527 /*Store SG I/O in the api for reuse */
3528 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3529 CPT_LOG_DP_ERR("Prepare src iov failed");
3531 goto free_mdata_and_exit;
3534 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3535 if (unlikely(*prep_req == NULL)) {
3537 goto free_mdata_and_exit;
3544 free_mdata_and_exit:
3545 free_op_meta(mdata, m_info->pool);
3550 #endif /*_CPT_UCODE_H_ */