1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline int
39 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 uint16_t mac_len = auth->digest_length;
45 case RTE_CRYPTO_AUTH_MD5:
46 case RTE_CRYPTO_AUTH_MD5_HMAC:
47 ret = (mac_len == 16) ? 0 : -1;
49 case RTE_CRYPTO_AUTH_SHA1:
50 case RTE_CRYPTO_AUTH_SHA1_HMAC:
51 ret = (mac_len == 20) ? 0 : -1;
53 case RTE_CRYPTO_AUTH_SHA224:
54 case RTE_CRYPTO_AUTH_SHA224_HMAC:
55 ret = (mac_len == 28) ? 0 : -1;
57 case RTE_CRYPTO_AUTH_SHA256:
58 case RTE_CRYPTO_AUTH_SHA256_HMAC:
59 ret = (mac_len == 32) ? 0 : -1;
61 case RTE_CRYPTO_AUTH_SHA384:
62 case RTE_CRYPTO_AUTH_SHA384_HMAC:
63 ret = (mac_len == 48) ? 0 : -1;
65 case RTE_CRYPTO_AUTH_SHA512:
66 case RTE_CRYPTO_AUTH_SHA512_HMAC:
67 ret = (mac_len == 64) ? 0 : -1;
69 case RTE_CRYPTO_AUTH_NULL:
79 static __rte_always_inline void
80 cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
83 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline int
88 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
100 static __rte_always_inline int
101 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
117 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 key_len = key_len / 2;
126 if (unlikely(key_len == 24)) {
127 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
130 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
136 if (unlikely(key_len != 16))
138 /* No support for AEAD yet */
139 if (unlikely(ctx->hash_type))
141 fc_type = ZUC_SNOW3G;
145 if (unlikely(key_len != 16))
147 /* No support for AEAD yet */
148 if (unlikely(ctx->hash_type))
156 ctx->fc_type = fc_type;
160 static __rte_always_inline void
161 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
163 cpt_ctx->enc_cipher = 0;
164 fctx->enc.enc_cipher = 0;
167 static __rte_always_inline void
168 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
170 mc_aes_type_t aes_key_type = 0;
173 aes_key_type = AES_128_BIT;
176 aes_key_type = AES_192_BIT;
179 aes_key_type = AES_256_BIT;
182 /* This should not happen */
183 CPT_LOG_DP_ERR("Invalid AES key len");
186 fctx->enc.aes_key = aes_key_type;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
197 gen_key_snow3g(key, keyx);
198 memcpy(zs_ctx->ci_key, keyx, key_len);
199 cpt_ctx->zsk_flags = 0;
202 static __rte_always_inline void
203 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
206 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
209 memcpy(zs_ctx->ci_key, key, key_len);
210 memcpy(zs_ctx->zuc_const, zuc_d, 32);
211 cpt_ctx->zsk_flags = 0;
214 static __rte_always_inline void
215 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
218 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
221 memcpy(k_ctx->ci_key, key, key_len);
222 cpt_ctx->zsk_flags = 0;
225 static __rte_always_inline void
226 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
229 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
231 memcpy(k_ctx->ci_key, key, key_len);
232 cpt_ctx->zsk_flags = 0;
235 static __rte_always_inline int
236 cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
237 const uint8_t *key, uint16_t key_len, uint8_t *salt)
239 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
242 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
246 if (cpt_ctx->fc_type == FC_GEN) {
248 * We need to always say IV is from DPTR as user can
249 * sometimes iverride IV per operation.
251 fctx->enc.iv_source = CPT_FROM_DPTR;
253 if (cpt_ctx->auth_key_len > 64)
259 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
262 /* CPT performs DES using 3DES with the 8B DES-key
263 * replicated 2 more times to match the 24B 3DES-key.
264 * Eg. If org. key is "0x0a 0x0b", then new key is
265 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
268 /* Skipping the first 8B as it will be copied
269 * in the regular code flow
271 memcpy(fctx->enc.encr_key+key_len, key, key_len);
272 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
276 /* For DES3_ECB IV need to be from CTX. */
277 fctx->enc.iv_source = CPT_FROM_CTX;
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 /* Even though iv source is from dptr,
288 * aes_gcm salt is taken from ctx
291 memcpy(fctx->enc.encr_iv, salt, 4);
292 /* Assuming it was just salt update
298 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
301 key_len = key_len / 2;
302 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
304 /* Copy key2 for XTS into ipad */
305 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
306 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
309 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
312 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
315 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
318 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
324 /* Only for FC_GEN case */
326 /* For GMAC auth, cipher must be NULL */
327 if (cpt_ctx->hash_type != GMAC_TYPE)
328 fctx->enc.enc_cipher = type;
330 memcpy(fctx->enc.encr_key, key, key_len);
333 cpt_ctx->enc_cipher = type;
338 static __rte_always_inline uint32_t
339 fill_sg_comp(sg_comp_t *list,
341 phys_addr_t dma_addr,
344 sg_comp_t *to = &list[i>>2];
346 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
347 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
352 static __rte_always_inline uint32_t
353 fill_sg_comp_from_buf(sg_comp_t *list,
357 sg_comp_t *to = &list[i>>2];
359 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
360 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
365 static __rte_always_inline uint32_t
366 fill_sg_comp_from_buf_min(sg_comp_t *list,
371 sg_comp_t *to = &list[i >> 2];
372 uint32_t size = *psize;
375 e_len = (size > from->size) ? from->size : size;
376 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
377 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
384 * This fills the MC expected SGIO list
385 * from IOV given by user.
387 static __rte_always_inline uint32_t
388 fill_sg_comp_from_iov(sg_comp_t *list,
390 iov_ptr_t *from, uint32_t from_offset,
391 uint32_t *psize, buf_ptr_t *extra_buf,
392 uint32_t extra_offset)
395 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
396 uint32_t size = *psize;
400 for (j = 0; (j < from->buf_cnt) && size; j++) {
401 phys_addr_t e_dma_addr;
403 sg_comp_t *to = &list[i >> 2];
405 if (unlikely(from_offset)) {
406 if (from_offset >= bufs[j].size) {
407 from_offset -= bufs[j].size;
410 e_dma_addr = bufs[j].dma_addr + from_offset;
411 e_len = (size > (bufs[j].size - from_offset)) ?
412 (bufs[j].size - from_offset) : size;
415 e_dma_addr = bufs[j].dma_addr;
416 e_len = (size > bufs[j].size) ?
420 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
421 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
423 if (extra_len && (e_len >= extra_offset)) {
424 /* Break the data at given offset */
425 uint32_t next_len = e_len - extra_offset;
426 phys_addr_t next_dma = e_dma_addr + extra_offset;
431 e_len = extra_offset;
433 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
436 extra_len = RTE_MIN(extra_len, size);
437 /* Insert extra data ptr */
442 rte_cpu_to_be_16(extra_len);
444 rte_cpu_to_be_64(extra_buf->dma_addr);
448 next_len = RTE_MIN(next_len, size);
449 /* insert the rest of the data */
453 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
454 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
463 extra_offset -= size;
471 static __rte_always_inline void
472 cpt_digest_gen_prep(uint32_t flags,
474 digest_params_t *params,
478 struct cpt_request_info *req;
480 uint16_t data_len, mac_len, key_len;
481 auth_type_t hash_type;
484 sg_comp_t *gather_comp;
485 sg_comp_t *scatter_comp;
487 uint32_t g_size_bytes, s_size_bytes;
488 uint64_t dptr_dma, rptr_dma;
489 vq_cmd_word0_t vq_cmd_w0;
490 void *c_vaddr, *m_vaddr;
491 uint64_t c_dma, m_dma;
492 opcode_info_t opcode;
494 ctx = params->ctx_buf.vaddr;
495 meta_p = ¶ms->meta_buf;
497 m_vaddr = meta_p->vaddr;
498 m_dma = meta_p->dma_addr;
501 * Save initial space that followed app data for completion code &
502 * alternate completion code to fall in same cache line as app data
504 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
505 m_dma += COMPLETION_CODE_SIZE;
506 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
508 c_vaddr = (uint8_t *)m_vaddr + size;
509 c_dma = m_dma + size;
510 size += sizeof(cpt_res_s_t);
512 m_vaddr = (uint8_t *)m_vaddr + size;
517 size = sizeof(struct cpt_request_info);
518 m_vaddr = (uint8_t *)m_vaddr + size;
521 hash_type = ctx->hash_type;
522 mac_len = ctx->mac_len;
523 key_len = ctx->auth_key_len;
524 data_len = AUTH_DLEN(d_lens);
528 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
530 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
531 vq_cmd_w0.s.param1 = key_len;
532 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
534 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
535 vq_cmd_w0.s.param1 = 0;
536 vq_cmd_w0.s.dlen = data_len;
541 /* Null auth only case enters the if */
542 if (unlikely(!hash_type && !ctx->enc_cipher)) {
543 opcode.s.major = CPT_MAJOR_OP_MISC;
544 /* Minor op is passthrough */
545 opcode.s.minor = 0x03;
546 /* Send out completion code only */
547 vq_cmd_w0.s.param2 = 0x1;
550 vq_cmd_w0.s.opcode = opcode.flags;
552 /* DPTR has SG list */
556 ((uint16_t *)in_buffer)[0] = 0;
557 ((uint16_t *)in_buffer)[1] = 0;
559 /* TODO Add error check if space will be sufficient */
560 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
569 uint64_t k_dma = params->ctx_buf.dma_addr +
570 offsetof(struct cpt_ctx, auth_key);
572 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
578 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
580 if (unlikely(size)) {
581 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
587 * Looks like we need to support zero data
588 * gather ptr in case of hash & hmac
592 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
593 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
600 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
602 if (flags & VALID_MAC_BUF) {
603 if (unlikely(params->mac_buf.size < mac_len)) {
604 CPT_LOG_DP_ERR("Insufficient MAC size");
609 i = fill_sg_comp_from_buf_min(scatter_comp, i,
610 ¶ms->mac_buf, &size);
613 i = fill_sg_comp_from_iov(scatter_comp, i,
614 params->src_iov, data_len,
616 if (unlikely(size)) {
617 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
623 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
624 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
626 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
628 /* This is DPTR len incase of SG mode */
629 vq_cmd_w0.s.dlen = size;
631 m_vaddr = (uint8_t *)m_vaddr + size;
634 /* cpt alternate completion address saved earlier */
635 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
636 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
637 rptr_dma = c_dma - 8;
639 req->ist.ei1 = dptr_dma;
640 req->ist.ei2 = rptr_dma;
642 /* 16 byte aligned cpt res address */
643 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
644 *req->completion_addr = COMPLETION_CODE_INIT;
645 req->comp_baddr = c_dma;
647 /* Fill microcode part of instruction */
648 req->ist.ei0 = vq_cmd_w0.u64;
656 static __rte_always_inline void
657 cpt_enc_hmac_prep(uint32_t flags,
660 fc_params_t *fc_params,
664 uint32_t iv_offset = 0;
665 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
666 struct cpt_ctx *cpt_ctx;
667 uint32_t cipher_type, hash_type;
668 uint32_t mac_len, size;
670 struct cpt_request_info *req;
671 buf_ptr_t *meta_p, *aad_buf = NULL;
672 uint32_t encr_offset, auth_offset;
673 uint32_t encr_data_len, auth_data_len, aad_len = 0;
674 uint32_t passthrough_len = 0;
675 void *m_vaddr, *offset_vaddr;
676 uint64_t m_dma, offset_dma;
677 vq_cmd_word0_t vq_cmd_w0;
680 opcode_info_t opcode;
682 meta_p = &fc_params->meta_buf;
683 m_vaddr = meta_p->vaddr;
684 m_dma = meta_p->dma_addr;
686 encr_offset = ENCR_OFFSET(d_offs);
687 auth_offset = AUTH_OFFSET(d_offs);
688 encr_data_len = ENCR_DLEN(d_lens);
689 auth_data_len = AUTH_DLEN(d_lens);
690 if (unlikely(flags & VALID_AAD_BUF)) {
692 * We dont support both aad
693 * and auth data separately
697 aad_len = fc_params->aad_buf.size;
698 aad_buf = &fc_params->aad_buf;
700 cpt_ctx = fc_params->ctx_buf.vaddr;
701 cipher_type = cpt_ctx->enc_cipher;
702 hash_type = cpt_ctx->hash_type;
703 mac_len = cpt_ctx->mac_len;
706 * Save initial space that followed app data for completion code &
707 * alternate completion code to fall in same cache line as app data
709 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
710 m_dma += COMPLETION_CODE_SIZE;
711 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
714 c_vaddr = (uint8_t *)m_vaddr + size;
715 c_dma = m_dma + size;
716 size += sizeof(cpt_res_s_t);
718 m_vaddr = (uint8_t *)m_vaddr + size;
721 /* start cpt request info struct at 8 byte boundary */
722 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
725 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
727 size += sizeof(struct cpt_request_info);
728 m_vaddr = (uint8_t *)m_vaddr + size;
731 if (unlikely(!(flags & VALID_IV_BUF))) {
733 iv_offset = ENCR_IV_OFFSET(d_offs);
736 if (unlikely(flags & VALID_AAD_BUF)) {
738 * When AAD is given, data above encr_offset is pass through
739 * Since AAD is given as separate pointer and not as offset,
740 * this is a special case as we need to fragment input data
741 * into passthrough + encr_data and then insert AAD in between.
743 if (hash_type != GMAC_TYPE) {
744 passthrough_len = encr_offset;
745 auth_offset = passthrough_len + iv_len;
746 encr_offset = passthrough_len + aad_len + iv_len;
747 auth_data_len = aad_len + encr_data_len;
749 passthrough_len = 16 + aad_len;
750 auth_offset = passthrough_len + iv_len;
751 auth_data_len = aad_len;
754 encr_offset += iv_len;
755 auth_offset += iv_len;
759 opcode.s.major = CPT_MAJOR_OP_FC;
762 if (hash_type == GMAC_TYPE) {
767 auth_dlen = auth_offset + auth_data_len;
768 enc_dlen = encr_data_len + encr_offset;
769 if (unlikely(encr_data_len & 0xf)) {
770 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
771 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
772 else if (likely((cipher_type == AES_CBC) ||
773 (cipher_type == AES_ECB)))
774 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
777 if (unlikely(auth_dlen > enc_dlen)) {
778 inputlen = auth_dlen;
779 outputlen = auth_dlen + mac_len;
782 outputlen = enc_dlen + mac_len;
787 vq_cmd_w0.s.param1 = encr_data_len;
788 vq_cmd_w0.s.param2 = auth_data_len;
790 * In 83XX since we have a limitation of
791 * IV & Offset control word not part of instruction
792 * and need to be part of Data Buffer, we check if
793 * head room is there and then only do the Direct mode processing
795 if (likely((flags & SINGLE_BUF_INPLACE) &&
796 (flags & SINGLE_BUF_HEADTAILROOM))) {
797 void *dm_vaddr = fc_params->bufs[0].vaddr;
798 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
800 * This flag indicates that there is 24 bytes head room and
801 * 8 bytes tail room available, so that we get to do
802 * DIRECT MODE with limitation
805 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
806 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
809 req->ist.ei1 = offset_dma;
810 /* RPTR should just exclude offset control word */
811 req->ist.ei2 = dm_dma_addr - iv_len;
812 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
813 + outputlen - iv_len);
815 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
817 vq_cmd_w0.s.opcode = opcode.flags;
819 if (likely(iv_len)) {
820 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
822 uint64_t *src = fc_params->iv_buf;
827 *(uint64_t *)offset_vaddr =
828 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
829 ((uint64_t)iv_offset << 8) |
830 ((uint64_t)auth_offset));
833 uint32_t i, g_size_bytes, s_size_bytes;
834 uint64_t dptr_dma, rptr_dma;
835 sg_comp_t *gather_comp;
836 sg_comp_t *scatter_comp;
839 /* This falls under strict SG mode */
840 offset_vaddr = m_vaddr;
842 size = OFF_CTRL_LEN + iv_len;
844 m_vaddr = (uint8_t *)m_vaddr + size;
847 opcode.s.major |= CPT_DMA_MODE;
849 vq_cmd_w0.s.opcode = opcode.flags;
851 if (likely(iv_len)) {
852 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
854 uint64_t *src = fc_params->iv_buf;
859 *(uint64_t *)offset_vaddr =
860 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
861 ((uint64_t)iv_offset << 8) |
862 ((uint64_t)auth_offset));
864 /* DPTR has SG list */
868 ((uint16_t *)in_buffer)[0] = 0;
869 ((uint16_t *)in_buffer)[1] = 0;
871 /* TODO Add error check if space will be sufficient */
872 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
880 /* Offset control word that includes iv */
881 i = fill_sg_comp(gather_comp, i, offset_dma,
882 OFF_CTRL_LEN + iv_len);
885 size = inputlen - iv_len;
887 uint32_t aad_offset = aad_len ? passthrough_len : 0;
889 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
890 i = fill_sg_comp_from_buf_min(gather_comp, i,
894 i = fill_sg_comp_from_iov(gather_comp, i,
897 aad_buf, aad_offset);
900 if (unlikely(size)) {
901 CPT_LOG_DP_ERR("Insufficient buffer space,"
902 " size %d needed", size);
906 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
907 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
910 * Output Scatter list
914 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
917 if (likely(iv_len)) {
918 i = fill_sg_comp(scatter_comp, i,
919 offset_dma + OFF_CTRL_LEN,
923 /* output data or output data + digest*/
924 if (unlikely(flags & VALID_MAC_BUF)) {
925 size = outputlen - iv_len - mac_len;
927 uint32_t aad_offset =
928 aad_len ? passthrough_len : 0;
930 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
931 i = fill_sg_comp_from_buf_min(
937 i = fill_sg_comp_from_iov(scatter_comp,
945 if (unlikely(size)) {
946 CPT_LOG_DP_ERR("Insufficient buffer"
947 " space, size %d needed",
954 i = fill_sg_comp_from_buf(scatter_comp, i,
955 &fc_params->mac_buf);
958 /* Output including mac */
959 size = outputlen - iv_len;
961 uint32_t aad_offset =
962 aad_len ? passthrough_len : 0;
964 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
965 i = fill_sg_comp_from_buf_min(
971 i = fill_sg_comp_from_iov(scatter_comp,
979 if (unlikely(size)) {
980 CPT_LOG_DP_ERR("Insufficient buffer"
981 " space, size %d needed",
987 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
988 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
990 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
992 /* This is DPTR len incase of SG mode */
993 vq_cmd_w0.s.dlen = size;
995 m_vaddr = (uint8_t *)m_vaddr + size;
998 /* cpt alternate completion address saved earlier */
999 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1000 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1001 rptr_dma = c_dma - 8;
1003 req->ist.ei1 = dptr_dma;
1004 req->ist.ei2 = rptr_dma;
1007 /* 16 byte aligned cpt res address */
1008 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1009 *req->completion_addr = COMPLETION_CODE_INIT;
1010 req->comp_baddr = c_dma;
1012 /* Fill microcode part of instruction */
1013 req->ist.ei0 = vq_cmd_w0.u64;
1021 static __rte_always_inline void
1022 cpt_dec_hmac_prep(uint32_t flags,
1025 fc_params_t *fc_params,
1029 uint32_t iv_offset = 0, size;
1030 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1031 struct cpt_ctx *cpt_ctx;
1032 int32_t hash_type, mac_len;
1033 uint8_t iv_len = 16;
1034 struct cpt_request_info *req;
1035 buf_ptr_t *meta_p, *aad_buf = NULL;
1036 uint32_t encr_offset, auth_offset;
1037 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1038 uint32_t passthrough_len = 0;
1039 void *m_vaddr, *offset_vaddr;
1040 uint64_t m_dma, offset_dma;
1041 opcode_info_t opcode;
1042 vq_cmd_word0_t vq_cmd_w0;
1046 meta_p = &fc_params->meta_buf;
1047 m_vaddr = meta_p->vaddr;
1048 m_dma = meta_p->dma_addr;
1050 encr_offset = ENCR_OFFSET(d_offs);
1051 auth_offset = AUTH_OFFSET(d_offs);
1052 encr_data_len = ENCR_DLEN(d_lens);
1053 auth_data_len = AUTH_DLEN(d_lens);
1055 if (unlikely(flags & VALID_AAD_BUF)) {
1057 * We dont support both aad
1058 * and auth data separately
1062 aad_len = fc_params->aad_buf.size;
1063 aad_buf = &fc_params->aad_buf;
1066 cpt_ctx = fc_params->ctx_buf.vaddr;
1067 hash_type = cpt_ctx->hash_type;
1068 mac_len = cpt_ctx->mac_len;
1070 if (unlikely(!(flags & VALID_IV_BUF))) {
1072 iv_offset = ENCR_IV_OFFSET(d_offs);
1075 if (unlikely(flags & VALID_AAD_BUF)) {
1077 * When AAD is given, data above encr_offset is pass through
1078 * Since AAD is given as separate pointer and not as offset,
1079 * this is a special case as we need to fragment input data
1080 * into passthrough + encr_data and then insert AAD in between.
1082 if (hash_type != GMAC_TYPE) {
1083 passthrough_len = encr_offset;
1084 auth_offset = passthrough_len + iv_len;
1085 encr_offset = passthrough_len + aad_len + iv_len;
1086 auth_data_len = aad_len + encr_data_len;
1088 passthrough_len = 16 + aad_len;
1089 auth_offset = passthrough_len + iv_len;
1090 auth_data_len = aad_len;
1093 encr_offset += iv_len;
1094 auth_offset += iv_len;
1098 * Save initial space that followed app data for completion code &
1099 * alternate completion code to fall in same cache line as app data
1101 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1102 m_dma += COMPLETION_CODE_SIZE;
1103 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1105 c_vaddr = (uint8_t *)m_vaddr + size;
1106 c_dma = m_dma + size;
1107 size += sizeof(cpt_res_s_t);
1109 m_vaddr = (uint8_t *)m_vaddr + size;
1112 /* start cpt request info structure at 8 byte alignment */
1113 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1116 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1118 size += sizeof(struct cpt_request_info);
1119 m_vaddr = (uint8_t *)m_vaddr + size;
1123 opcode.s.major = CPT_MAJOR_OP_FC;
1126 if (hash_type == GMAC_TYPE) {
1131 enc_dlen = encr_offset + encr_data_len;
1132 auth_dlen = auth_offset + auth_data_len;
1134 if (auth_dlen > enc_dlen) {
1135 inputlen = auth_dlen + mac_len;
1136 outputlen = auth_dlen;
1138 inputlen = enc_dlen + mac_len;
1139 outputlen = enc_dlen;
1143 vq_cmd_w0.s.param1 = encr_data_len;
1144 vq_cmd_w0.s.param2 = auth_data_len;
1147 * In 83XX since we have a limitation of
1148 * IV & Offset control word not part of instruction
1149 * and need to be part of Data Buffer, we check if
1150 * head room is there and then only do the Direct mode processing
1152 if (likely((flags & SINGLE_BUF_INPLACE) &&
1153 (flags & SINGLE_BUF_HEADTAILROOM))) {
1154 void *dm_vaddr = fc_params->bufs[0].vaddr;
1155 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1157 * This flag indicates that there is 24 bytes head room and
1158 * 8 bytes tail room available, so that we get to do
1159 * DIRECT MODE with limitation
1162 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1163 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1164 req->ist.ei1 = offset_dma;
1166 /* RPTR should just exclude offset control word */
1167 req->ist.ei2 = dm_dma_addr - iv_len;
1169 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1170 outputlen - iv_len);
1171 /* since this is decryption,
1172 * don't touch the content of
1173 * alternate ccode space as it contains
1177 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1179 vq_cmd_w0.s.opcode = opcode.flags;
1181 if (likely(iv_len)) {
1182 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1184 uint64_t *src = fc_params->iv_buf;
1189 *(uint64_t *)offset_vaddr =
1190 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1191 ((uint64_t)iv_offset << 8) |
1192 ((uint64_t)auth_offset));
1195 uint64_t dptr_dma, rptr_dma;
1196 uint32_t g_size_bytes, s_size_bytes;
1197 sg_comp_t *gather_comp;
1198 sg_comp_t *scatter_comp;
1202 /* This falls under strict SG mode */
1203 offset_vaddr = m_vaddr;
1205 size = OFF_CTRL_LEN + iv_len;
1207 m_vaddr = (uint8_t *)m_vaddr + size;
1210 opcode.s.major |= CPT_DMA_MODE;
1212 vq_cmd_w0.s.opcode = opcode.flags;
1214 if (likely(iv_len)) {
1215 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1217 uint64_t *src = fc_params->iv_buf;
1222 *(uint64_t *)offset_vaddr =
1223 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1224 ((uint64_t)iv_offset << 8) |
1225 ((uint64_t)auth_offset));
1227 /* DPTR has SG list */
1228 in_buffer = m_vaddr;
1231 ((uint16_t *)in_buffer)[0] = 0;
1232 ((uint16_t *)in_buffer)[1] = 0;
1234 /* TODO Add error check if space will be sufficient */
1235 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1242 /* Offset control word that includes iv */
1243 i = fill_sg_comp(gather_comp, i, offset_dma,
1244 OFF_CTRL_LEN + iv_len);
1246 /* Add input data */
1247 if (flags & VALID_MAC_BUF) {
1248 size = inputlen - iv_len - mac_len;
1250 /* input data only */
1251 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1252 i = fill_sg_comp_from_buf_min(
1257 uint32_t aad_offset = aad_len ?
1258 passthrough_len : 0;
1260 i = fill_sg_comp_from_iov(gather_comp,
1267 if (unlikely(size)) {
1268 CPT_LOG_DP_ERR("Insufficient buffer"
1269 " space, size %d needed",
1277 i = fill_sg_comp_from_buf(gather_comp, i,
1278 &fc_params->mac_buf);
1281 /* input data + mac */
1282 size = inputlen - iv_len;
1284 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1285 i = fill_sg_comp_from_buf_min(
1290 uint32_t aad_offset = aad_len ?
1291 passthrough_len : 0;
1293 if (unlikely(!fc_params->src_iov)) {
1294 CPT_LOG_DP_ERR("Bad input args");
1298 i = fill_sg_comp_from_iov(
1306 if (unlikely(size)) {
1307 CPT_LOG_DP_ERR("Insufficient buffer"
1308 " space, size %d needed",
1314 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1315 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1318 * Output Scatter List
1323 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1327 i = fill_sg_comp(scatter_comp, i,
1328 offset_dma + OFF_CTRL_LEN,
1332 /* Add output data */
1333 size = outputlen - iv_len;
1335 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1336 /* handle single buffer here */
1337 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1341 uint32_t aad_offset = aad_len ?
1342 passthrough_len : 0;
1344 if (unlikely(!fc_params->dst_iov)) {
1345 CPT_LOG_DP_ERR("Bad input args");
1349 i = fill_sg_comp_from_iov(scatter_comp, i,
1350 fc_params->dst_iov, 0,
1355 if (unlikely(size)) {
1356 CPT_LOG_DP_ERR("Insufficient buffer space,"
1357 " size %d needed", size);
1362 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1363 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1365 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1367 /* This is DPTR len incase of SG mode */
1368 vq_cmd_w0.s.dlen = size;
1370 m_vaddr = (uint8_t *)m_vaddr + size;
1373 /* cpt alternate completion address saved earlier */
1374 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1375 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1376 rptr_dma = c_dma - 8;
1377 size += COMPLETION_CODE_SIZE;
1379 req->ist.ei1 = dptr_dma;
1380 req->ist.ei2 = rptr_dma;
1383 /* 16 byte aligned cpt res address */
1384 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1385 *req->completion_addr = COMPLETION_CODE_INIT;
1386 req->comp_baddr = c_dma;
1388 /* Fill microcode part of instruction */
1389 req->ist.ei0 = vq_cmd_w0.u64;
1397 static __rte_always_inline void
1398 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1401 fc_params_t *params,
1406 int32_t inputlen, outputlen;
1407 struct cpt_ctx *cpt_ctx;
1408 uint32_t mac_len = 0;
1410 struct cpt_request_info *req;
1412 uint32_t encr_offset = 0, auth_offset = 0;
1413 uint32_t encr_data_len = 0, auth_data_len = 0;
1414 int flags, iv_len = 16;
1415 void *m_vaddr, *c_vaddr;
1416 uint64_t m_dma, c_dma, offset_ctrl;
1417 uint64_t *offset_vaddr, offset_dma;
1418 uint32_t *iv_s, iv[4];
1419 vq_cmd_word0_t vq_cmd_w0;
1420 opcode_info_t opcode;
1422 buf_p = ¶ms->meta_buf;
1423 m_vaddr = buf_p->vaddr;
1424 m_dma = buf_p->dma_addr;
1426 cpt_ctx = params->ctx_buf.vaddr;
1427 flags = cpt_ctx->zsk_flags;
1428 mac_len = cpt_ctx->mac_len;
1429 snow3g = cpt_ctx->snow3g;
1432 * Save initial space that followed app data for completion code &
1433 * alternate completion code to fall in same cache line as app data
1435 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1436 m_dma += COMPLETION_CODE_SIZE;
1437 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1440 c_vaddr = (uint8_t *)m_vaddr + size;
1441 c_dma = m_dma + size;
1442 size += sizeof(cpt_res_s_t);
1444 m_vaddr = (uint8_t *)m_vaddr + size;
1447 /* Reserve memory for cpt request info */
1450 size = sizeof(struct cpt_request_info);
1451 m_vaddr = (uint8_t *)m_vaddr + size;
1454 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1456 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1458 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1459 (0 << 3) | (flags & 0x7));
1463 * Microcode expects offsets in bytes
1464 * TODO: Rounding off
1466 auth_data_len = AUTH_DLEN(d_lens);
1469 auth_offset = AUTH_OFFSET(d_offs);
1470 auth_offset = auth_offset / 8;
1472 /* consider iv len */
1473 auth_offset += iv_len;
1475 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1476 outputlen = mac_len;
1478 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1483 * Microcode expects offsets in bytes
1484 * TODO: Rounding off
1486 encr_data_len = ENCR_DLEN(d_lens);
1488 encr_offset = ENCR_OFFSET(d_offs);
1489 encr_offset = encr_offset / 8;
1490 /* consider iv len */
1491 encr_offset += iv_len;
1493 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1494 outputlen = inputlen;
1496 /* iv offset is 0 */
1497 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1501 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1506 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1507 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1510 for (j = 0; j < 4; j++)
1511 iv[j] = iv_s[3 - j];
1513 /* ZUC doesn't need a swap */
1514 for (j = 0; j < 4; j++)
1519 * GP op header, lengths are expected in bits.
1522 vq_cmd_w0.s.param1 = encr_data_len;
1523 vq_cmd_w0.s.param2 = auth_data_len;
1526 * In 83XX since we have a limitation of
1527 * IV & Offset control word not part of instruction
1528 * and need to be part of Data Buffer, we check if
1529 * head room is there and then only do the Direct mode processing
1531 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1532 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1533 void *dm_vaddr = params->bufs[0].vaddr;
1534 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1536 * This flag indicates that there is 24 bytes head room and
1537 * 8 bytes tail room available, so that we get to do
1538 * DIRECT MODE with limitation
1541 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1542 OFF_CTRL_LEN - iv_len);
1543 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1546 req->ist.ei1 = offset_dma;
1547 /* RPTR should just exclude offset control word */
1548 req->ist.ei2 = dm_dma_addr - iv_len;
1549 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1550 + outputlen - iv_len);
1552 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1554 vq_cmd_w0.s.opcode = opcode.flags;
1556 if (likely(iv_len)) {
1557 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1559 memcpy(iv_d, iv, 16);
1562 *offset_vaddr = offset_ctrl;
1564 uint32_t i, g_size_bytes, s_size_bytes;
1565 uint64_t dptr_dma, rptr_dma;
1566 sg_comp_t *gather_comp;
1567 sg_comp_t *scatter_comp;
1571 /* save space for iv */
1572 offset_vaddr = m_vaddr;
1575 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1576 m_dma += OFF_CTRL_LEN + iv_len;
1578 opcode.s.major |= CPT_DMA_MODE;
1580 vq_cmd_w0.s.opcode = opcode.flags;
1582 /* DPTR has SG list */
1583 in_buffer = m_vaddr;
1586 ((uint16_t *)in_buffer)[0] = 0;
1587 ((uint16_t *)in_buffer)[1] = 0;
1589 /* TODO Add error check if space will be sufficient */
1590 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1597 /* Offset control word followed by iv */
1599 i = fill_sg_comp(gather_comp, i, offset_dma,
1600 OFF_CTRL_LEN + iv_len);
1602 /* iv offset is 0 */
1603 *offset_vaddr = offset_ctrl;
1605 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1606 memcpy(iv_d, iv, 16);
1609 size = inputlen - iv_len;
1611 i = fill_sg_comp_from_iov(gather_comp, i,
1614 if (unlikely(size)) {
1615 CPT_LOG_DP_ERR("Insufficient buffer space,"
1616 " size %d needed", size);
1620 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1621 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1624 * Output Scatter List
1629 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1632 /* IV in SLIST only for EEA3 & UEA2 */
1637 i = fill_sg_comp(scatter_comp, i,
1638 offset_dma + OFF_CTRL_LEN, iv_len);
1641 /* Add output data */
1642 if (req_flags & VALID_MAC_BUF) {
1643 size = outputlen - iv_len - mac_len;
1645 i = fill_sg_comp_from_iov(scatter_comp, i,
1649 if (unlikely(size)) {
1650 CPT_LOG_DP_ERR("Insufficient buffer space,"
1651 " size %d needed", size);
1658 i = fill_sg_comp_from_buf(scatter_comp, i,
1662 /* Output including mac */
1663 size = outputlen - iv_len;
1665 i = fill_sg_comp_from_iov(scatter_comp, i,
1669 if (unlikely(size)) {
1670 CPT_LOG_DP_ERR("Insufficient buffer space,"
1671 " size %d needed", size);
1676 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1677 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1679 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1681 /* This is DPTR len incase of SG mode */
1682 vq_cmd_w0.s.dlen = size;
1684 m_vaddr = (uint8_t *)m_vaddr + size;
1687 /* cpt alternate completion address saved earlier */
1688 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1689 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1690 rptr_dma = c_dma - 8;
1692 req->ist.ei1 = dptr_dma;
1693 req->ist.ei2 = rptr_dma;
1696 /* 16 byte aligned cpt res address */
1697 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1698 *req->completion_addr = COMPLETION_CODE_INIT;
1699 req->comp_baddr = c_dma;
1701 /* Fill microcode part of instruction */
1702 req->ist.ei0 = vq_cmd_w0.u64;
1710 static __rte_always_inline void
1711 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1714 fc_params_t *params,
1719 int32_t inputlen = 0, outputlen;
1720 struct cpt_ctx *cpt_ctx;
1721 uint8_t snow3g, iv_len = 16;
1722 struct cpt_request_info *req;
1724 uint32_t encr_offset;
1725 uint32_t encr_data_len;
1727 void *m_vaddr, *c_vaddr;
1728 uint64_t m_dma, c_dma;
1729 uint64_t *offset_vaddr, offset_dma;
1730 uint32_t *iv_s, iv[4], j;
1731 vq_cmd_word0_t vq_cmd_w0;
1732 opcode_info_t opcode;
1734 buf_p = ¶ms->meta_buf;
1735 m_vaddr = buf_p->vaddr;
1736 m_dma = buf_p->dma_addr;
1739 * Microcode expects offsets in bytes
1740 * TODO: Rounding off
1742 encr_offset = ENCR_OFFSET(d_offs) / 8;
1743 encr_data_len = ENCR_DLEN(d_lens);
1745 cpt_ctx = params->ctx_buf.vaddr;
1746 flags = cpt_ctx->zsk_flags;
1747 snow3g = cpt_ctx->snow3g;
1749 * Save initial space that followed app data for completion code &
1750 * alternate completion code to fall in same cache line as app data
1752 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1753 m_dma += COMPLETION_CODE_SIZE;
1754 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1757 c_vaddr = (uint8_t *)m_vaddr + size;
1758 c_dma = m_dma + size;
1759 size += sizeof(cpt_res_s_t);
1761 m_vaddr = (uint8_t *)m_vaddr + size;
1764 /* Reserve memory for cpt request info */
1767 size = sizeof(struct cpt_request_info);
1768 m_vaddr = (uint8_t *)m_vaddr + size;
1771 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1773 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1775 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1776 (0 << 3) | (flags & 0x7));
1778 /* consider iv len */
1779 encr_offset += iv_len;
1781 inputlen = encr_offset +
1782 (RTE_ALIGN(encr_data_len, 8) / 8);
1783 outputlen = inputlen;
1786 iv_s = params->iv_buf;
1789 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1790 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1793 for (j = 0; j < 4; j++)
1794 iv[j] = iv_s[3 - j];
1796 /* ZUC doesn't need a swap */
1797 for (j = 0; j < 4; j++)
1802 * GP op header, lengths are expected in bits.
1805 vq_cmd_w0.s.param1 = encr_data_len;
1808 * In 83XX since we have a limitation of
1809 * IV & Offset control word not part of instruction
1810 * and need to be part of Data Buffer, we check if
1811 * head room is there and then only do the Direct mode processing
1813 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1814 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1815 void *dm_vaddr = params->bufs[0].vaddr;
1816 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1818 * This flag indicates that there is 24 bytes head room and
1819 * 8 bytes tail room available, so that we get to do
1820 * DIRECT MODE with limitation
1823 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1824 OFF_CTRL_LEN - iv_len);
1825 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1828 req->ist.ei1 = offset_dma;
1829 /* RPTR should just exclude offset control word */
1830 req->ist.ei2 = dm_dma_addr - iv_len;
1831 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1832 + outputlen - iv_len);
1834 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1836 vq_cmd_w0.s.opcode = opcode.flags;
1838 if (likely(iv_len)) {
1839 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1841 memcpy(iv_d, iv, 16);
1844 /* iv offset is 0 */
1845 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1847 uint32_t i, g_size_bytes, s_size_bytes;
1848 uint64_t dptr_dma, rptr_dma;
1849 sg_comp_t *gather_comp;
1850 sg_comp_t *scatter_comp;
1854 /* save space for offset and iv... */
1855 offset_vaddr = m_vaddr;
1858 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1859 m_dma += OFF_CTRL_LEN + iv_len;
1861 opcode.s.major |= CPT_DMA_MODE;
1863 vq_cmd_w0.s.opcode = opcode.flags;
1865 /* DPTR has SG list */
1866 in_buffer = m_vaddr;
1869 ((uint16_t *)in_buffer)[0] = 0;
1870 ((uint16_t *)in_buffer)[1] = 0;
1872 /* TODO Add error check if space will be sufficient */
1873 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1880 /* Offset control word */
1882 /* iv offset is 0 */
1883 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1885 i = fill_sg_comp(gather_comp, i, offset_dma,
1886 OFF_CTRL_LEN + iv_len);
1888 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1889 memcpy(iv_d, iv, 16);
1891 /* Add input data */
1892 size = inputlen - iv_len;
1894 i = fill_sg_comp_from_iov(gather_comp, i,
1897 if (unlikely(size)) {
1898 CPT_LOG_DP_ERR("Insufficient buffer space,"
1899 " size %d needed", size);
1903 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1904 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1907 * Output Scatter List
1912 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1915 i = fill_sg_comp(scatter_comp, i,
1916 offset_dma + OFF_CTRL_LEN,
1919 /* Add output data */
1920 size = outputlen - iv_len;
1922 i = fill_sg_comp_from_iov(scatter_comp, i,
1926 if (unlikely(size)) {
1927 CPT_LOG_DP_ERR("Insufficient buffer space,"
1928 " size %d needed", size);
1932 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1933 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1935 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1937 /* This is DPTR len incase of SG mode */
1938 vq_cmd_w0.s.dlen = size;
1940 m_vaddr = (uint8_t *)m_vaddr + size;
1943 /* cpt alternate completion address saved earlier */
1944 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1945 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1946 rptr_dma = c_dma - 8;
1948 req->ist.ei1 = dptr_dma;
1949 req->ist.ei2 = rptr_dma;
1952 /* 16 byte aligned cpt res address */
1953 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1954 *req->completion_addr = COMPLETION_CODE_INIT;
1955 req->comp_baddr = c_dma;
1957 /* Fill microcode part of instruction */
1958 req->ist.ei0 = vq_cmd_w0.u64;
1966 static __rte_always_inline void
1967 cpt_kasumi_enc_prep(uint32_t req_flags,
1970 fc_params_t *params,
1975 int32_t inputlen = 0, outputlen = 0;
1976 struct cpt_ctx *cpt_ctx;
1977 uint32_t mac_len = 0;
1979 struct cpt_request_info *req;
1981 uint32_t encr_offset, auth_offset;
1982 uint32_t encr_data_len, auth_data_len;
1984 uint8_t *iv_s, *iv_d, iv_len = 8;
1986 void *m_vaddr, *c_vaddr;
1987 uint64_t m_dma, c_dma;
1988 uint64_t *offset_vaddr, offset_dma;
1989 vq_cmd_word0_t vq_cmd_w0;
1990 opcode_info_t opcode;
1992 uint32_t g_size_bytes, s_size_bytes;
1993 uint64_t dptr_dma, rptr_dma;
1994 sg_comp_t *gather_comp;
1995 sg_comp_t *scatter_comp;
1997 buf_p = ¶ms->meta_buf;
1998 m_vaddr = buf_p->vaddr;
1999 m_dma = buf_p->dma_addr;
2001 encr_offset = ENCR_OFFSET(d_offs) / 8;
2002 auth_offset = AUTH_OFFSET(d_offs) / 8;
2003 encr_data_len = ENCR_DLEN(d_lens);
2004 auth_data_len = AUTH_DLEN(d_lens);
2006 cpt_ctx = params->ctx_buf.vaddr;
2007 flags = cpt_ctx->zsk_flags;
2008 mac_len = cpt_ctx->mac_len;
2011 iv_s = params->iv_buf;
2013 iv_s = params->auth_iv_buf;
2015 dir = iv_s[8] & 0x1;
2018 * Save initial space that followed app data for completion code &
2019 * alternate completion code to fall in same cache line as app data
2021 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2022 m_dma += COMPLETION_CODE_SIZE;
2023 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2026 c_vaddr = (uint8_t *)m_vaddr + size;
2027 c_dma = m_dma + size;
2028 size += sizeof(cpt_res_s_t);
2030 m_vaddr = (uint8_t *)m_vaddr + size;
2033 /* Reserve memory for cpt request info */
2036 size = sizeof(struct cpt_request_info);
2037 m_vaddr = (uint8_t *)m_vaddr + size;
2040 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2042 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2043 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2044 (dir << 4) | (0 << 3) | (flags & 0x7));
2047 * GP op header, lengths are expected in bits.
2050 vq_cmd_w0.s.param1 = encr_data_len;
2051 vq_cmd_w0.s.param2 = auth_data_len;
2052 vq_cmd_w0.s.opcode = opcode.flags;
2054 /* consider iv len */
2056 encr_offset += iv_len;
2057 auth_offset += iv_len;
2060 /* save space for offset ctrl and iv */
2061 offset_vaddr = m_vaddr;
2064 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2065 m_dma += OFF_CTRL_LEN + iv_len;
2067 /* DPTR has SG list */
2068 in_buffer = m_vaddr;
2071 ((uint16_t *)in_buffer)[0] = 0;
2072 ((uint16_t *)in_buffer)[1] = 0;
2074 /* TODO Add error check if space will be sufficient */
2075 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2082 /* Offset control word followed by iv */
2085 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2086 outputlen = inputlen;
2087 /* iv offset is 0 */
2088 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2090 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2091 outputlen = mac_len;
2092 /* iv offset is 0 */
2093 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2096 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2099 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2100 memcpy(iv_d, iv_s, iv_len);
2103 size = inputlen - iv_len;
2105 i = fill_sg_comp_from_iov(gather_comp, i,
2109 if (unlikely(size)) {
2110 CPT_LOG_DP_ERR("Insufficient buffer space,"
2111 " size %d needed", size);
2115 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2116 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2119 * Output Scatter List
2123 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2126 /* IV in SLIST only for F8 */
2132 i = fill_sg_comp(scatter_comp, i,
2133 offset_dma + OFF_CTRL_LEN,
2137 /* Add output data */
2138 if (req_flags & VALID_MAC_BUF) {
2139 size = outputlen - iv_len - mac_len;
2141 i = fill_sg_comp_from_iov(scatter_comp, i,
2145 if (unlikely(size)) {
2146 CPT_LOG_DP_ERR("Insufficient buffer space,"
2147 " size %d needed", size);
2154 i = fill_sg_comp_from_buf(scatter_comp, i,
2158 /* Output including mac */
2159 size = outputlen - iv_len;
2161 i = fill_sg_comp_from_iov(scatter_comp, i,
2165 if (unlikely(size)) {
2166 CPT_LOG_DP_ERR("Insufficient buffer space,"
2167 " size %d needed", size);
2172 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2173 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2175 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2177 /* This is DPTR len incase of SG mode */
2178 vq_cmd_w0.s.dlen = size;
2180 m_vaddr = (uint8_t *)m_vaddr + size;
2183 /* cpt alternate completion address saved earlier */
2184 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2185 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2186 rptr_dma = c_dma - 8;
2188 req->ist.ei1 = dptr_dma;
2189 req->ist.ei2 = rptr_dma;
2191 /* 16 byte aligned cpt res address */
2192 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2193 *req->completion_addr = COMPLETION_CODE_INIT;
2194 req->comp_baddr = c_dma;
2196 /* Fill microcode part of instruction */
2197 req->ist.ei0 = vq_cmd_w0.u64;
2205 static __rte_always_inline void
2206 cpt_kasumi_dec_prep(uint64_t d_offs,
2208 fc_params_t *params,
2213 int32_t inputlen = 0, outputlen;
2214 struct cpt_ctx *cpt_ctx;
2215 uint8_t i = 0, iv_len = 8;
2216 struct cpt_request_info *req;
2218 uint32_t encr_offset;
2219 uint32_t encr_data_len;
2222 void *m_vaddr, *c_vaddr;
2223 uint64_t m_dma, c_dma;
2224 uint64_t *offset_vaddr, offset_dma;
2225 vq_cmd_word0_t vq_cmd_w0;
2226 opcode_info_t opcode;
2228 uint32_t g_size_bytes, s_size_bytes;
2229 uint64_t dptr_dma, rptr_dma;
2230 sg_comp_t *gather_comp;
2231 sg_comp_t *scatter_comp;
2233 buf_p = ¶ms->meta_buf;
2234 m_vaddr = buf_p->vaddr;
2235 m_dma = buf_p->dma_addr;
2237 encr_offset = ENCR_OFFSET(d_offs) / 8;
2238 encr_data_len = ENCR_DLEN(d_lens);
2240 cpt_ctx = params->ctx_buf.vaddr;
2241 flags = cpt_ctx->zsk_flags;
2243 * Save initial space that followed app data for completion code &
2244 * alternate completion code to fall in same cache line as app data
2246 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2247 m_dma += COMPLETION_CODE_SIZE;
2248 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2251 c_vaddr = (uint8_t *)m_vaddr + size;
2252 c_dma = m_dma + size;
2253 size += sizeof(cpt_res_s_t);
2255 m_vaddr = (uint8_t *)m_vaddr + size;
2258 /* Reserve memory for cpt request info */
2261 size = sizeof(struct cpt_request_info);
2262 m_vaddr = (uint8_t *)m_vaddr + size;
2265 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2267 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2268 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2269 (dir << 4) | (0 << 3) | (flags & 0x7));
2272 * GP op header, lengths are expected in bits.
2275 vq_cmd_w0.s.param1 = encr_data_len;
2276 vq_cmd_w0.s.opcode = opcode.flags;
2278 /* consider iv len */
2279 encr_offset += iv_len;
2281 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2282 outputlen = inputlen;
2284 /* save space for offset ctrl & iv */
2285 offset_vaddr = m_vaddr;
2288 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2289 m_dma += OFF_CTRL_LEN + iv_len;
2291 /* DPTR has SG list */
2292 in_buffer = m_vaddr;
2295 ((uint16_t *)in_buffer)[0] = 0;
2296 ((uint16_t *)in_buffer)[1] = 0;
2298 /* TODO Add error check if space will be sufficient */
2299 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2306 /* Offset control word followed by iv */
2307 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2309 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2312 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2313 params->iv_buf, iv_len);
2315 /* Add input data */
2316 size = inputlen - iv_len;
2318 i = fill_sg_comp_from_iov(gather_comp, i,
2321 if (unlikely(size)) {
2322 CPT_LOG_DP_ERR("Insufficient buffer space,"
2323 " size %d needed", size);
2327 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2328 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2331 * Output Scatter List
2335 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2338 i = fill_sg_comp(scatter_comp, i,
2339 offset_dma + OFF_CTRL_LEN,
2342 /* Add output data */
2343 size = outputlen - iv_len;
2345 i = fill_sg_comp_from_iov(scatter_comp, i,
2348 if (unlikely(size)) {
2349 CPT_LOG_DP_ERR("Insufficient buffer space,"
2350 " size %d needed", size);
2354 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2355 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2357 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2359 /* This is DPTR len incase of SG mode */
2360 vq_cmd_w0.s.dlen = size;
2362 m_vaddr = (uint8_t *)m_vaddr + size;
2365 /* cpt alternate completion address saved earlier */
2366 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2367 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2368 rptr_dma = c_dma - 8;
2370 req->ist.ei1 = dptr_dma;
2371 req->ist.ei2 = rptr_dma;
2373 /* 16 byte aligned cpt res address */
2374 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2375 *req->completion_addr = COMPLETION_CODE_INIT;
2376 req->comp_baddr = c_dma;
2378 /* Fill microcode part of instruction */
2379 req->ist.ei0 = vq_cmd_w0.u64;
2387 static __rte_always_inline void *
2388 cpt_fc_dec_hmac_prep(uint32_t flags,
2391 fc_params_t *fc_params,
2394 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2396 void *prep_req = NULL;
2398 fc_type = ctx->fc_type;
2400 if (likely(fc_type == FC_GEN)) {
2401 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2403 } else if (fc_type == ZUC_SNOW3G) {
2404 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2406 } else if (fc_type == KASUMI) {
2407 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2411 * For AUTH_ONLY case,
2412 * MC only supports digest generation and verification
2413 * should be done in software by memcmp()
2419 static __rte_always_inline void *__rte_hot
2420 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2421 fc_params_t *fc_params, void *op)
2423 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2425 void *prep_req = NULL;
2427 fc_type = ctx->fc_type;
2429 /* Common api for rest of the ops */
2430 if (likely(fc_type == FC_GEN)) {
2431 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2433 } else if (fc_type == ZUC_SNOW3G) {
2434 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2436 } else if (fc_type == KASUMI) {
2437 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2439 } else if (fc_type == HASH_HMAC) {
2440 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2446 static __rte_always_inline int
2447 cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
2448 const uint8_t *key, uint16_t key_len, uint16_t mac_len)
2450 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
2451 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
2452 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
2454 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2459 /* No support for AEAD yet */
2460 if (cpt_ctx->enc_cipher)
2462 /* For ZUC/SNOW3G/Kasumi */
2465 cpt_ctx->snow3g = 1;
2466 gen_key_snow3g(key, keyx);
2467 memcpy(zs_ctx->ci_key, keyx, key_len);
2468 cpt_ctx->fc_type = ZUC_SNOW3G;
2469 cpt_ctx->zsk_flags = 0x1;
2472 cpt_ctx->snow3g = 0;
2473 memcpy(zs_ctx->ci_key, key, key_len);
2474 memcpy(zs_ctx->zuc_const, zuc_d, 32);
2475 cpt_ctx->fc_type = ZUC_SNOW3G;
2476 cpt_ctx->zsk_flags = 0x1;
2479 /* Kasumi ECB mode */
2481 memcpy(k_ctx->ci_key, key, key_len);
2482 cpt_ctx->fc_type = KASUMI;
2483 cpt_ctx->zsk_flags = 0x1;
2486 memcpy(k_ctx->ci_key, key, key_len);
2487 cpt_ctx->fc_type = KASUMI;
2488 cpt_ctx->zsk_flags = 0x1;
2493 cpt_ctx->mac_len = 4;
2494 cpt_ctx->hash_type = type;
2498 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2499 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2500 cpt_ctx->fc_type = HASH_HMAC;
2503 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2506 /* For GMAC auth, cipher must be NULL */
2507 if (type == GMAC_TYPE)
2508 fctx->enc.enc_cipher = 0;
2510 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2511 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2515 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2516 memcpy(cpt_ctx->auth_key, key, key_len);
2517 cpt_ctx->auth_key_len = key_len;
2518 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2519 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2522 memcpy(fctx->hmac.opad, key, key_len);
2523 fctx->enc.auth_input_type = 1;
2528 static __rte_always_inline int
2529 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2530 struct cpt_sess_misc *sess)
2532 struct rte_crypto_aead_xform *aead_form;
2533 cipher_type_t enc_type = 0; /* NULL Cipher type */
2534 auth_type_t auth_type = 0; /* NULL Auth type */
2535 uint32_t cipher_key_len = 0;
2536 uint8_t aes_gcm = 0;
2537 aead_form = &xform->aead;
2538 void *ctx = SESS_PRIV(sess);
2540 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2541 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2542 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2543 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2544 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2545 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2547 CPT_LOG_DP_ERR("Unknown aead operation\n");
2550 switch (aead_form->algo) {
2551 case RTE_CRYPTO_AEAD_AES_GCM:
2553 cipher_key_len = 16;
2556 case RTE_CRYPTO_AEAD_AES_CCM:
2557 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2560 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2561 enc_type = CHACHA20;
2562 auth_type = POLY1305;
2563 cipher_key_len = 32;
2564 sess->chacha_poly = 1;
2567 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2571 if (aead_form->key.length < cipher_key_len) {
2572 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2573 (unsigned int long)aead_form->key.length);
2577 sess->aes_gcm = aes_gcm;
2578 sess->mac_len = aead_form->digest_length;
2579 sess->iv_offset = aead_form->iv.offset;
2580 sess->iv_length = aead_form->iv.length;
2581 sess->aad_length = aead_form->aad_length;
2583 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2584 aead_form->key.length, NULL)))
2587 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2588 aead_form->digest_length)))
2594 static __rte_always_inline int
2595 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2596 struct cpt_sess_misc *sess)
2598 struct rte_crypto_cipher_xform *c_form;
2599 cipher_type_t enc_type = 0; /* NULL Cipher type */
2600 uint32_t cipher_key_len = 0;
2601 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2603 c_form = &xform->cipher;
2605 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2606 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2607 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2608 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2610 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2614 switch (c_form->algo) {
2615 case RTE_CRYPTO_CIPHER_AES_CBC:
2617 cipher_key_len = 16;
2619 case RTE_CRYPTO_CIPHER_3DES_CBC:
2620 enc_type = DES3_CBC;
2621 cipher_key_len = 24;
2623 case RTE_CRYPTO_CIPHER_DES_CBC:
2624 /* DES is implemented using 3DES in hardware */
2625 enc_type = DES3_CBC;
2628 case RTE_CRYPTO_CIPHER_AES_CTR:
2630 cipher_key_len = 16;
2633 case RTE_CRYPTO_CIPHER_NULL:
2637 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2638 enc_type = KASUMI_F8_ECB;
2639 cipher_key_len = 16;
2642 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2643 enc_type = SNOW3G_UEA2;
2644 cipher_key_len = 16;
2647 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2648 enc_type = ZUC_EEA3;
2649 cipher_key_len = 16;
2652 case RTE_CRYPTO_CIPHER_AES_XTS:
2654 cipher_key_len = 16;
2656 case RTE_CRYPTO_CIPHER_3DES_ECB:
2657 enc_type = DES3_ECB;
2658 cipher_key_len = 24;
2660 case RTE_CRYPTO_CIPHER_AES_ECB:
2662 cipher_key_len = 16;
2664 case RTE_CRYPTO_CIPHER_3DES_CTR:
2665 case RTE_CRYPTO_CIPHER_AES_F8:
2666 case RTE_CRYPTO_CIPHER_ARC4:
2667 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2671 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2676 if (c_form->key.length < cipher_key_len) {
2677 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2678 (unsigned long) c_form->key.length);
2682 sess->zsk_flag = zsk_flag;
2684 sess->aes_ctr = aes_ctr;
2685 sess->iv_offset = c_form->iv.offset;
2686 sess->iv_length = c_form->iv.length;
2687 sess->is_null = is_null;
2689 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2690 c_form->key.data, c_form->key.length, NULL)))
2696 static __rte_always_inline int
2697 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2698 struct cpt_sess_misc *sess)
2700 struct rte_crypto_auth_xform *a_form;
2701 auth_type_t auth_type = 0; /* NULL Auth type */
2702 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2704 a_form = &xform->auth;
2706 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2707 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2708 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2709 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2711 CPT_LOG_DP_ERR("Unknown auth operation");
2715 switch (a_form->algo) {
2716 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2718 case RTE_CRYPTO_AUTH_SHA1:
2719 auth_type = SHA1_TYPE;
2721 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2722 case RTE_CRYPTO_AUTH_SHA256:
2723 auth_type = SHA2_SHA256;
2725 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2726 case RTE_CRYPTO_AUTH_SHA512:
2727 auth_type = SHA2_SHA512;
2729 case RTE_CRYPTO_AUTH_AES_GMAC:
2730 auth_type = GMAC_TYPE;
2733 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2734 case RTE_CRYPTO_AUTH_SHA224:
2735 auth_type = SHA2_SHA224;
2737 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2738 case RTE_CRYPTO_AUTH_SHA384:
2739 auth_type = SHA2_SHA384;
2741 case RTE_CRYPTO_AUTH_MD5_HMAC:
2742 case RTE_CRYPTO_AUTH_MD5:
2743 auth_type = MD5_TYPE;
2745 case RTE_CRYPTO_AUTH_KASUMI_F9:
2746 auth_type = KASUMI_F9_ECB;
2748 * Indicate that direction needs to be taken out
2753 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2754 auth_type = SNOW3G_UIA2;
2757 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2758 auth_type = ZUC_EIA3;
2761 case RTE_CRYPTO_AUTH_NULL:
2765 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2766 case RTE_CRYPTO_AUTH_AES_CMAC:
2767 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2768 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2772 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2777 sess->zsk_flag = zsk_flag;
2778 sess->aes_gcm = aes_gcm;
2779 sess->mac_len = a_form->digest_length;
2780 sess->is_null = is_null;
2782 sess->auth_iv_offset = a_form->iv.offset;
2783 sess->auth_iv_length = a_form->iv.length;
2785 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2786 a_form->key.data, a_form->key.length,
2787 a_form->digest_length)))
2793 static __rte_always_inline int
2794 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2795 struct cpt_sess_misc *sess)
2797 struct rte_crypto_auth_xform *a_form;
2798 cipher_type_t enc_type = 0; /* NULL Cipher type */
2799 auth_type_t auth_type = 0; /* NULL Auth type */
2800 void *ctx = SESS_PRIV(sess);
2802 a_form = &xform->auth;
2804 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2805 sess->cpt_op |= CPT_OP_ENCODE;
2806 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2807 sess->cpt_op |= CPT_OP_DECODE;
2809 CPT_LOG_DP_ERR("Unknown auth operation");
2813 switch (a_form->algo) {
2814 case RTE_CRYPTO_AUTH_AES_GMAC:
2816 auth_type = GMAC_TYPE;
2819 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2827 sess->iv_offset = a_form->iv.offset;
2828 sess->iv_length = a_form->iv.length;
2829 sess->mac_len = a_form->digest_length;
2831 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2832 a_form->key.length, NULL)))
2835 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2836 a_form->digest_length)))
2842 static __rte_always_inline void *
2843 alloc_op_meta(struct rte_mbuf *m_src,
2846 struct rte_mempool *cpt_meta_pool)
2850 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2851 if (likely(m_src && (m_src->nb_segs == 1))) {
2855 /* Check if tailroom is sufficient to hold meta data */
2856 tailroom = rte_pktmbuf_tailroom(m_src);
2857 if (likely(tailroom > len + 8)) {
2858 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2859 mphys = m_src->buf_iova + m_src->buf_len;
2863 buf->dma_addr = mphys;
2865 /* Indicate that this is a mbuf allocated mdata */
2866 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2871 RTE_SET_USED(m_src);
2874 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2878 buf->dma_addr = rte_mempool_virt2iova(mdata);
2885 * cpt_free_metabuf - free metabuf to mempool.
2886 * @param instance: pointer to instance.
2887 * @param objp: pointer to the metabuf.
2889 static __rte_always_inline void
2890 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2892 bool nofree = ((uintptr_t)mdata & 1ull);
2896 rte_mempool_put(cpt_meta_pool, mdata);
2899 static __rte_always_inline uint32_t
2900 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2901 iov_ptr_t *iovec, uint32_t start_offset)
2904 void *seg_data = NULL;
2905 phys_addr_t seg_phys;
2906 int32_t seg_size = 0;
2913 if (!start_offset) {
2914 seg_data = rte_pktmbuf_mtod(pkt, void *);
2915 seg_phys = rte_pktmbuf_iova(pkt);
2916 seg_size = pkt->data_len;
2918 while (start_offset >= pkt->data_len) {
2919 start_offset -= pkt->data_len;
2923 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2924 seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2925 seg_size = pkt->data_len - start_offset;
2931 iovec->bufs[index].vaddr = seg_data;
2932 iovec->bufs[index].dma_addr = seg_phys;
2933 iovec->bufs[index].size = seg_size;
2937 while (unlikely(pkt != NULL)) {
2938 seg_data = rte_pktmbuf_mtod(pkt, void *);
2939 seg_phys = rte_pktmbuf_iova(pkt);
2940 seg_size = pkt->data_len;
2944 iovec->bufs[index].vaddr = seg_data;
2945 iovec->bufs[index].dma_addr = seg_phys;
2946 iovec->bufs[index].size = seg_size;
2953 iovec->buf_cnt = index;
2957 static __rte_always_inline uint32_t
2958 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2963 void *seg_data = NULL;
2964 phys_addr_t seg_phys;
2965 uint32_t seg_size = 0;
2968 seg_data = rte_pktmbuf_mtod(pkt, void *);
2969 seg_phys = rte_pktmbuf_iova(pkt);
2970 seg_size = pkt->data_len;
2973 if (likely(!pkt->next)) {
2974 uint32_t headroom, tailroom;
2976 *flags |= SINGLE_BUF_INPLACE;
2977 headroom = rte_pktmbuf_headroom(pkt);
2978 tailroom = rte_pktmbuf_tailroom(pkt);
2979 if (likely((headroom >= 24) &&
2981 /* In 83XX this is prerequivisit for Direct mode */
2982 *flags |= SINGLE_BUF_HEADTAILROOM;
2984 param->bufs[0].vaddr = seg_data;
2985 param->bufs[0].dma_addr = seg_phys;
2986 param->bufs[0].size = seg_size;
2989 iovec = param->src_iov;
2990 iovec->bufs[index].vaddr = seg_data;
2991 iovec->bufs[index].dma_addr = seg_phys;
2992 iovec->bufs[index].size = seg_size;
2996 while (unlikely(pkt != NULL)) {
2997 seg_data = rte_pktmbuf_mtod(pkt, void *);
2998 seg_phys = rte_pktmbuf_iova(pkt);
2999 seg_size = pkt->data_len;
3004 iovec->bufs[index].vaddr = seg_data;
3005 iovec->bufs[index].dma_addr = seg_phys;
3006 iovec->bufs[index].size = seg_size;
3013 iovec->buf_cnt = index;
3017 static __rte_always_inline int
3018 fill_fc_params(struct rte_crypto_op *cop,
3019 struct cpt_sess_misc *sess_misc,
3020 struct cpt_qp_meta_info *m_info,
3025 struct rte_crypto_sym_op *sym_op = cop->sym;
3028 uint32_t mc_hash_off;
3030 uint64_t d_offs, d_lens;
3031 struct rte_mbuf *m_src, *m_dst;
3032 uint8_t cpt_op = sess_misc->cpt_op;
3033 #ifdef CPT_ALWAYS_USE_SG_MODE
3034 uint8_t inplace = 0;
3036 uint8_t inplace = 1;
3038 fc_params_t fc_params;
3039 char src[SRC_IOV_SIZE];
3040 char dst[SRC_IOV_SIZE];
3044 if (likely(sess_misc->iv_length)) {
3045 flags |= VALID_IV_BUF;
3046 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3047 uint8_t *, sess_misc->iv_offset);
3048 if (sess_misc->aes_ctr &&
3049 unlikely(sess_misc->iv_length != 16)) {
3050 memcpy((uint8_t *)iv_buf,
3051 rte_crypto_op_ctod_offset(cop,
3052 uint8_t *, sess_misc->iv_offset), 12);
3053 iv_buf[3] = rte_cpu_to_be_32(0x1);
3054 fc_params.iv_buf = iv_buf;
3058 if (sess_misc->zsk_flag) {
3059 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3061 sess_misc->auth_iv_offset);
3062 if (sess_misc->zsk_flag != ZS_EA)
3065 m_src = sym_op->m_src;
3066 m_dst = sym_op->m_dst;
3068 if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3073 d_offs = sym_op->aead.data.offset;
3074 d_lens = sym_op->aead.data.length;
3075 mc_hash_off = sym_op->aead.data.offset +
3076 sym_op->aead.data.length;
3078 aad_data = sym_op->aead.aad.data;
3079 aad_len = sess_misc->aad_length;
3080 if (likely((aad_data + aad_len) ==
3081 rte_pktmbuf_mtod_offset(m_src,
3083 sym_op->aead.data.offset))) {
3084 d_offs = (d_offs - aad_len) | (d_offs << 16);
3085 d_lens = (d_lens + aad_len) | (d_lens << 32);
3087 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3088 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3089 fc_params.aad_buf.size = aad_len;
3090 flags |= VALID_AAD_BUF;
3092 d_offs = d_offs << 16;
3093 d_lens = d_lens << 32;
3096 salt = fc_params.iv_buf;
3097 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3098 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3099 sess_misc->salt = *(uint32_t *)salt;
3101 fc_params.iv_buf = salt + 4;
3102 if (likely(sess_misc->mac_len)) {
3103 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3109 /* hmac immediately following data is best case */
3110 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3112 (uint8_t *)sym_op->aead.digest.data)) {
3113 flags |= VALID_MAC_BUF;
3114 fc_params.mac_buf.size = sess_misc->mac_len;
3115 fc_params.mac_buf.vaddr =
3116 sym_op->aead.digest.data;
3117 fc_params.mac_buf.dma_addr =
3118 sym_op->aead.digest.phys_addr;
3123 d_offs = sym_op->cipher.data.offset;
3124 d_lens = sym_op->cipher.data.length;
3125 mc_hash_off = sym_op->cipher.data.offset +
3126 sym_op->cipher.data.length;
3127 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3128 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3130 if (mc_hash_off < (sym_op->auth.data.offset +
3131 sym_op->auth.data.length)){
3132 mc_hash_off = (sym_op->auth.data.offset +
3133 sym_op->auth.data.length);
3135 /* for gmac, salt should be updated like in gcm */
3136 if (unlikely(sess_misc->is_gmac)) {
3138 salt = fc_params.iv_buf;
3139 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3140 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3141 sess_misc->salt = *(uint32_t *)salt;
3143 fc_params.iv_buf = salt + 4;
3145 if (likely(sess_misc->mac_len)) {
3148 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3152 /* hmac immediately following data is best case */
3153 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3155 (uint8_t *)sym_op->auth.digest.data)) {
3156 flags |= VALID_MAC_BUF;
3157 fc_params.mac_buf.size =
3159 fc_params.mac_buf.vaddr =
3160 sym_op->auth.digest.data;
3161 fc_params.mac_buf.dma_addr =
3162 sym_op->auth.digest.phys_addr;
3167 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3168 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3170 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3173 if (likely(!m_dst && inplace)) {
3174 /* Case of single buffer without AAD buf or
3175 * separate mac buf in place and
3178 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3180 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3183 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3189 /* Out of place processing */
3190 fc_params.src_iov = (void *)src;
3191 fc_params.dst_iov = (void *)dst;
3193 /* Store SG I/O in the api for reuse */
3194 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3195 CPT_LOG_DP_ERR("Prepare src iov failed");
3200 if (unlikely(m_dst != NULL)) {
3203 /* Try to make room as much as src has */
3204 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3206 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3207 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3208 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3209 CPT_LOG_DP_ERR("Not enough space in "
3218 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3219 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3225 fc_params.dst_iov = (void *)src;
3229 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3230 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3231 m_info->lb_mlen, m_info->pool);
3233 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3234 m_info->sg_mlen, m_info->pool);
3236 if (unlikely(mdata == NULL)) {
3237 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3242 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3243 op[0] = (uintptr_t)mdata;
3244 op[1] = (uintptr_t)cop;
3245 op[2] = op[3] = 0; /* Used to indicate auth verify */
3246 space += 4 * sizeof(uint64_t);
3248 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3249 fc_params.meta_buf.dma_addr += space;
3250 fc_params.meta_buf.size -= space;
3252 /* Finally prepare the instruction */
3253 if (cpt_op & CPT_OP_ENCODE)
3254 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3257 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3260 if (unlikely(*prep_req == NULL)) {
3261 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3263 goto free_mdata_and_exit;
3270 free_mdata_and_exit:
3271 free_op_meta(mdata, m_info->pool);
3276 static __rte_always_inline void
3277 compl_auth_verify(struct rte_crypto_op *op,
3282 struct rte_crypto_sym_op *sym_op = op->sym;
3284 if (sym_op->auth.digest.data)
3285 mac = sym_op->auth.digest.data;
3287 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3289 sym_op->auth.data.length +
3290 sym_op->auth.data.offset);
3292 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3296 if (memcmp(mac, gen_mac, mac_len))
3297 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3299 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3302 static __rte_always_inline void
3303 find_kasumif9_direction_and_length(uint8_t *src,
3304 uint32_t counter_num_bytes,
3305 uint32_t *addr_length_in_bits,
3306 uint8_t *addr_direction)
3311 while (!found && counter_num_bytes > 0) {
3312 counter_num_bytes--;
3313 if (src[counter_num_bytes] == 0x00)
3315 pos = rte_bsf32(src[counter_num_bytes]);
3317 if (likely(counter_num_bytes > 0)) {
3318 last_byte = src[counter_num_bytes - 1];
3319 *addr_direction = last_byte & 0x1;
3320 *addr_length_in_bits = counter_num_bytes * 8
3324 last_byte = src[counter_num_bytes];
3325 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3326 *addr_length_in_bits = counter_num_bytes * 8
3334 * This handles all auth only except AES_GMAC
3336 static __rte_always_inline int
3337 fill_digest_params(struct rte_crypto_op *cop,
3338 struct cpt_sess_misc *sess,
3339 struct cpt_qp_meta_info *m_info,
3344 struct rte_crypto_sym_op *sym_op = cop->sym;
3348 uint32_t auth_range_off;
3350 uint64_t d_offs = 0, d_lens;
3351 struct rte_mbuf *m_src, *m_dst;
3352 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3353 uint16_t mac_len = sess->mac_len;
3355 char src[SRC_IOV_SIZE];
3359 memset(¶ms, 0, sizeof(fc_params_t));
3361 m_src = sym_op->m_src;
3363 /* For just digest lets force mempool alloc */
3364 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3366 if (mdata == NULL) {
3371 mphys = params.meta_buf.dma_addr;
3374 op[0] = (uintptr_t)mdata;
3375 op[1] = (uintptr_t)cop;
3376 op[2] = op[3] = 0; /* Used to indicate auth verify */
3377 space += 4 * sizeof(uint64_t);
3379 auth_range_off = sym_op->auth.data.offset;
3381 flags = VALID_MAC_BUF;
3382 params.src_iov = (void *)src;
3383 if (unlikely(sess->zsk_flag)) {
3385 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3386 * we will send pass through even for auth only case,
3389 d_offs = auth_range_off;
3391 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3392 uint8_t *, sess->auth_iv_offset);
3393 if (sess->zsk_flag == K_F9) {
3394 uint32_t length_in_bits, num_bytes;
3395 uint8_t *src, direction = 0;
3397 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3400 * This is kasumi f9, take direction from
3403 length_in_bits = cop->sym->auth.data.length;
3404 num_bytes = (length_in_bits >> 3);
3405 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3406 find_kasumif9_direction_and_length(src,
3410 length_in_bits -= 64;
3411 cop->sym->auth.data.offset += 64;
3412 d_offs = cop->sym->auth.data.offset;
3413 auth_range_off = d_offs / 8;
3414 cop->sym->auth.data.length = length_in_bits;
3416 /* Store it at end of auth iv */
3417 iv_buf[8] = direction;
3418 params.auth_iv_buf = iv_buf;
3422 d_lens = sym_op->auth.data.length;
3424 params.ctx_buf.vaddr = SESS_PRIV(sess);
3425 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3427 if (auth_op == CPT_OP_AUTH_GENERATE) {
3428 if (sym_op->auth.digest.data) {
3430 * Digest to be generated
3431 * in separate buffer
3433 params.mac_buf.size =
3435 params.mac_buf.vaddr =
3436 sym_op->auth.digest.data;
3437 params.mac_buf.dma_addr =
3438 sym_op->auth.digest.phys_addr;
3440 uint32_t off = sym_op->auth.data.offset +
3441 sym_op->auth.data.length;
3442 int32_t dlen, space;
3444 m_dst = sym_op->m_dst ?
3445 sym_op->m_dst : sym_op->m_src;
3446 dlen = rte_pktmbuf_pkt_len(m_dst);
3448 space = off + mac_len - dlen;
3450 if (!rte_pktmbuf_append(m_dst, space)) {
3451 CPT_LOG_DP_ERR("Failed to extend "
3452 "mbuf by %uB", space);
3454 goto free_mdata_and_exit;
3457 params.mac_buf.vaddr =
3458 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3459 params.mac_buf.dma_addr =
3460 rte_pktmbuf_iova_offset(m_dst, off);
3461 params.mac_buf.size = mac_len;
3464 /* Need space for storing generated mac */
3465 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3466 params.mac_buf.dma_addr = mphys + space;
3467 params.mac_buf.size = mac_len;
3468 space += RTE_ALIGN_CEIL(mac_len, 8);
3469 op[2] = (uintptr_t)params.mac_buf.vaddr;
3473 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3474 params.meta_buf.dma_addr = mphys + space;
3475 params.meta_buf.size -= space;
3477 /* Out of place processing */
3478 params.src_iov = (void *)src;
3480 /*Store SG I/O in the api for reuse */
3481 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3482 CPT_LOG_DP_ERR("Prepare src iov failed");
3484 goto free_mdata_and_exit;
3487 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3488 if (unlikely(*prep_req == NULL)) {
3490 goto free_mdata_and_exit;
3497 free_mdata_and_exit:
3498 free_op_meta(mdata, m_info->pool);
3503 #endif /*_CPT_UCODE_H_ */