1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline int
39 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 uint16_t mac_len = auth->digest_length;
45 case RTE_CRYPTO_AUTH_MD5:
46 case RTE_CRYPTO_AUTH_MD5_HMAC:
47 ret = (mac_len == 16) ? 0 : -1;
49 case RTE_CRYPTO_AUTH_SHA1:
50 case RTE_CRYPTO_AUTH_SHA1_HMAC:
51 ret = (mac_len == 20) ? 0 : -1;
53 case RTE_CRYPTO_AUTH_SHA224:
54 case RTE_CRYPTO_AUTH_SHA224_HMAC:
55 ret = (mac_len == 28) ? 0 : -1;
57 case RTE_CRYPTO_AUTH_SHA256:
58 case RTE_CRYPTO_AUTH_SHA256_HMAC:
59 ret = (mac_len == 32) ? 0 : -1;
61 case RTE_CRYPTO_AUTH_SHA384:
62 case RTE_CRYPTO_AUTH_SHA384_HMAC:
63 ret = (mac_len == 48) ? 0 : -1;
65 case RTE_CRYPTO_AUTH_SHA512:
66 case RTE_CRYPTO_AUTH_SHA512_HMAC:
67 ret = (mac_len == 64) ? 0 : -1;
69 case RTE_CRYPTO_AUTH_NULL:
79 static __rte_always_inline void
80 cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
83 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline int
88 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
100 static __rte_always_inline int
101 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
117 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 key_len = key_len / 2;
126 if (unlikely(key_len == 24)) {
127 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
130 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
136 if (unlikely(key_len != 16))
138 /* No support for AEAD yet */
139 if (unlikely(ctx->hash_type))
141 fc_type = ZUC_SNOW3G;
145 if (unlikely(key_len != 16))
147 /* No support for AEAD yet */
148 if (unlikely(ctx->hash_type))
156 ctx->fc_type = fc_type;
160 static __rte_always_inline void
161 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
163 cpt_ctx->enc_cipher = 0;
164 fctx->enc.enc_cipher = 0;
167 static __rte_always_inline void
168 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
170 mc_aes_type_t aes_key_type = 0;
173 aes_key_type = AES_128_BIT;
176 aes_key_type = AES_192_BIT;
179 aes_key_type = AES_256_BIT;
182 /* This should not happen */
183 CPT_LOG_DP_ERR("Invalid AES key len");
186 fctx->enc.aes_key = aes_key_type;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
197 gen_key_snow3g(key, keyx);
198 memcpy(zs_ctx->ci_key, keyx, key_len);
199 cpt_ctx->zsk_flags = 0;
202 static __rte_always_inline void
203 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
206 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
209 memcpy(zs_ctx->ci_key, key, key_len);
210 memcpy(zs_ctx->zuc_const, zuc_d, 32);
211 cpt_ctx->zsk_flags = 0;
214 static __rte_always_inline void
215 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
218 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
221 memcpy(k_ctx->ci_key, key, key_len);
222 cpt_ctx->zsk_flags = 0;
225 static __rte_always_inline void
226 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
229 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
231 memcpy(k_ctx->ci_key, key, key_len);
232 cpt_ctx->zsk_flags = 0;
235 static __rte_always_inline int
236 cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
237 const uint8_t *key, uint16_t key_len, uint8_t *salt)
239 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
242 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
246 if (cpt_ctx->fc_type == FC_GEN) {
248 * We need to always say IV is from DPTR as user can
249 * sometimes iverride IV per operation.
251 fctx->enc.iv_source = CPT_FROM_DPTR;
253 if (cpt_ctx->auth_key_len > 64)
259 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
262 /* CPT performs DES using 3DES with the 8B DES-key
263 * replicated 2 more times to match the 24B 3DES-key.
264 * Eg. If org. key is "0x0a 0x0b", then new key is
265 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
268 /* Skipping the first 8B as it will be copied
269 * in the regular code flow
271 memcpy(fctx->enc.encr_key+key_len, key, key_len);
272 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
276 /* For DES3_ECB IV need to be from CTX. */
277 fctx->enc.iv_source = CPT_FROM_CTX;
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 /* Even though iv source is from dptr,
288 * aes_gcm salt is taken from ctx
291 memcpy(fctx->enc.encr_iv, salt, 4);
292 /* Assuming it was just salt update
298 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
301 key_len = key_len / 2;
302 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
304 /* Copy key2 for XTS into ipad */
305 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
306 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
309 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
312 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
315 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
318 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
324 /* Only for FC_GEN case */
326 /* For GMAC auth, cipher must be NULL */
327 if (cpt_ctx->hash_type != GMAC_TYPE)
328 fctx->enc.enc_cipher = type;
330 memcpy(fctx->enc.encr_key, key, key_len);
333 cpt_ctx->enc_cipher = type;
338 static __rte_always_inline uint32_t
339 fill_sg_comp(sg_comp_t *list,
341 phys_addr_t dma_addr,
344 sg_comp_t *to = &list[i>>2];
346 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
347 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
352 static __rte_always_inline uint32_t
353 fill_sg_comp_from_buf(sg_comp_t *list,
357 sg_comp_t *to = &list[i>>2];
359 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
360 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
365 static __rte_always_inline uint32_t
366 fill_sg_comp_from_buf_min(sg_comp_t *list,
371 sg_comp_t *to = &list[i >> 2];
372 uint32_t size = *psize;
375 e_len = (size > from->size) ? from->size : size;
376 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
377 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
384 * This fills the MC expected SGIO list
385 * from IOV given by user.
387 static __rte_always_inline uint32_t
388 fill_sg_comp_from_iov(sg_comp_t *list,
390 iov_ptr_t *from, uint32_t from_offset,
391 uint32_t *psize, buf_ptr_t *extra_buf,
392 uint32_t extra_offset)
395 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
396 uint32_t size = *psize;
400 for (j = 0; (j < from->buf_cnt) && size; j++) {
401 phys_addr_t e_dma_addr;
403 sg_comp_t *to = &list[i >> 2];
405 if (unlikely(from_offset)) {
406 if (from_offset >= bufs[j].size) {
407 from_offset -= bufs[j].size;
410 e_dma_addr = bufs[j].dma_addr + from_offset;
411 e_len = (size > (bufs[j].size - from_offset)) ?
412 (bufs[j].size - from_offset) : size;
415 e_dma_addr = bufs[j].dma_addr;
416 e_len = (size > bufs[j].size) ?
420 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
421 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
423 if (extra_len && (e_len >= extra_offset)) {
424 /* Break the data at given offset */
425 uint32_t next_len = e_len - extra_offset;
426 phys_addr_t next_dma = e_dma_addr + extra_offset;
431 e_len = extra_offset;
433 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
436 extra_len = RTE_MIN(extra_len, size);
437 /* Insert extra data ptr */
442 rte_cpu_to_be_16(extra_len);
444 rte_cpu_to_be_64(extra_buf->dma_addr);
448 next_len = RTE_MIN(next_len, size);
449 /* insert the rest of the data */
453 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
454 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
463 extra_offset -= size;
471 static __rte_always_inline void
472 cpt_digest_gen_prep(uint32_t flags,
474 digest_params_t *params,
478 struct cpt_request_info *req;
480 uint16_t data_len, mac_len, key_len;
481 auth_type_t hash_type;
484 sg_comp_t *gather_comp;
485 sg_comp_t *scatter_comp;
487 uint32_t g_size_bytes, s_size_bytes;
488 uint64_t dptr_dma, rptr_dma;
489 vq_cmd_word0_t vq_cmd_w0;
490 void *c_vaddr, *m_vaddr;
491 uint64_t c_dma, m_dma;
493 ctx = params->ctx_buf.vaddr;
494 meta_p = ¶ms->meta_buf;
496 m_vaddr = meta_p->vaddr;
497 m_dma = meta_p->dma_addr;
500 * Save initial space that followed app data for completion code &
501 * alternate completion code to fall in same cache line as app data
503 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
504 m_dma += COMPLETION_CODE_SIZE;
505 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
507 c_vaddr = (uint8_t *)m_vaddr + size;
508 c_dma = m_dma + size;
509 size += sizeof(cpt_res_s_t);
511 m_vaddr = (uint8_t *)m_vaddr + size;
516 size = sizeof(struct cpt_request_info);
517 m_vaddr = (uint8_t *)m_vaddr + size;
520 hash_type = ctx->hash_type;
521 mac_len = ctx->mac_len;
522 key_len = ctx->auth_key_len;
523 data_len = AUTH_DLEN(d_lens);
526 vq_cmd_w0.s.opcode.minor = 0;
527 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
529 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
530 vq_cmd_w0.s.param1 = key_len;
531 vq_cmd_w0.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
533 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
534 vq_cmd_w0.s.param1 = 0;
535 vq_cmd_w0.s.dlen = data_len;
538 /* Null auth only case enters the if */
539 if (unlikely(!hash_type && !ctx->enc_cipher)) {
540 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
541 /* Minor op is passthrough */
542 vq_cmd_w0.s.opcode.minor = 0x03;
543 /* Send out completion code only */
544 vq_cmd_w0.s.param2 = 0x1;
547 /* DPTR has SG list */
551 ((uint16_t *)in_buffer)[0] = 0;
552 ((uint16_t *)in_buffer)[1] = 0;
554 /* TODO Add error check if space will be sufficient */
555 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
564 uint64_t k_dma = params->ctx_buf.dma_addr +
565 offsetof(struct cpt_ctx, auth_key);
567 i = fill_sg_comp(gather_comp, i, k_dma,
568 RTE_ALIGN_CEIL(key_len, 8));
574 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
576 if (unlikely(size)) {
577 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
583 * Looks like we need to support zero data
584 * gather ptr in case of hash & hmac
588 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
589 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
596 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
598 if (flags & VALID_MAC_BUF) {
599 if (unlikely(params->mac_buf.size < mac_len)) {
600 CPT_LOG_DP_ERR("Insufficient MAC size");
605 i = fill_sg_comp_from_buf_min(scatter_comp, i,
606 ¶ms->mac_buf, &size);
609 i = fill_sg_comp_from_iov(scatter_comp, i,
610 params->src_iov, data_len,
612 if (unlikely(size)) {
613 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
619 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
620 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
622 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
624 /* This is DPTR len incase of SG mode */
625 vq_cmd_w0.s.dlen = size;
627 m_vaddr = (uint8_t *)m_vaddr + size;
630 /* cpt alternate completion address saved earlier */
631 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
632 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
633 rptr_dma = c_dma - 8;
635 req->ist.ei1 = dptr_dma;
636 req->ist.ei2 = rptr_dma;
638 /* 16 byte aligned cpt res address */
639 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
640 *req->completion_addr = COMPLETION_CODE_INIT;
641 req->comp_baddr = c_dma;
643 /* Fill microcode part of instruction */
644 req->ist.ei0 = vq_cmd_w0.u64;
652 static __rte_always_inline void
653 cpt_enc_hmac_prep(uint32_t flags,
656 fc_params_t *fc_params,
660 uint32_t iv_offset = 0;
661 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
662 struct cpt_ctx *cpt_ctx;
663 uint32_t cipher_type, hash_type;
664 uint32_t mac_len, size;
666 struct cpt_request_info *req;
667 buf_ptr_t *meta_p, *aad_buf = NULL;
668 uint32_t encr_offset, auth_offset;
669 uint32_t encr_data_len, auth_data_len, aad_len = 0;
670 uint32_t passthrough_len = 0;
671 void *m_vaddr, *offset_vaddr;
672 uint64_t m_dma, offset_dma;
673 vq_cmd_word0_t vq_cmd_w0;
677 meta_p = &fc_params->meta_buf;
678 m_vaddr = meta_p->vaddr;
679 m_dma = meta_p->dma_addr;
681 encr_offset = ENCR_OFFSET(d_offs);
682 auth_offset = AUTH_OFFSET(d_offs);
683 encr_data_len = ENCR_DLEN(d_lens);
684 auth_data_len = AUTH_DLEN(d_lens);
685 if (unlikely(flags & VALID_AAD_BUF)) {
687 * We dont support both aad
688 * and auth data separately
692 aad_len = fc_params->aad_buf.size;
693 aad_buf = &fc_params->aad_buf;
695 cpt_ctx = fc_params->ctx_buf.vaddr;
696 cipher_type = cpt_ctx->enc_cipher;
697 hash_type = cpt_ctx->hash_type;
698 mac_len = cpt_ctx->mac_len;
701 * Save initial space that followed app data for completion code &
702 * alternate completion code to fall in same cache line as app data
704 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
705 m_dma += COMPLETION_CODE_SIZE;
706 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
709 c_vaddr = (uint8_t *)m_vaddr + size;
710 c_dma = m_dma + size;
711 size += sizeof(cpt_res_s_t);
713 m_vaddr = (uint8_t *)m_vaddr + size;
716 /* start cpt request info struct at 8 byte boundary */
717 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
720 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
722 size += sizeof(struct cpt_request_info);
723 m_vaddr = (uint8_t *)m_vaddr + size;
726 if (unlikely(!(flags & VALID_IV_BUF))) {
728 iv_offset = ENCR_IV_OFFSET(d_offs);
731 if (unlikely(flags & VALID_AAD_BUF)) {
733 * When AAD is given, data above encr_offset is pass through
734 * Since AAD is given as separate pointer and not as offset,
735 * this is a special case as we need to fragment input data
736 * into passthrough + encr_data and then insert AAD in between.
738 if (hash_type != GMAC_TYPE) {
739 passthrough_len = encr_offset;
740 auth_offset = passthrough_len + iv_len;
741 encr_offset = passthrough_len + aad_len + iv_len;
742 auth_data_len = aad_len + encr_data_len;
744 passthrough_len = 16 + aad_len;
745 auth_offset = passthrough_len + iv_len;
746 auth_data_len = aad_len;
749 encr_offset += iv_len;
750 auth_offset += iv_len;
754 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
755 vq_cmd_w0.s.opcode.minor = 0;
757 if (hash_type == GMAC_TYPE) {
762 auth_dlen = auth_offset + auth_data_len;
763 enc_dlen = encr_data_len + encr_offset;
764 if (unlikely(encr_data_len & 0xf)) {
765 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
766 enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
768 else if (likely((cipher_type == AES_CBC) ||
769 (cipher_type == AES_ECB)))
770 enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
774 if (unlikely(auth_dlen > enc_dlen)) {
775 inputlen = auth_dlen;
776 outputlen = auth_dlen + mac_len;
779 outputlen = enc_dlen + mac_len;
783 vq_cmd_w0.s.param1 = encr_data_len;
784 vq_cmd_w0.s.param2 = auth_data_len;
786 * In 83XX since we have a limitation of
787 * IV & Offset control word not part of instruction
788 * and need to be part of Data Buffer, we check if
789 * head room is there and then only do the Direct mode processing
791 if (likely((flags & SINGLE_BUF_INPLACE) &&
792 (flags & SINGLE_BUF_HEADTAILROOM))) {
793 void *dm_vaddr = fc_params->bufs[0].vaddr;
794 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
796 * This flag indicates that there is 24 bytes head room and
797 * 8 bytes tail room available, so that we get to do
798 * DIRECT MODE with limitation
801 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
802 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
805 req->ist.ei1 = offset_dma;
806 /* RPTR should just exclude offset control word */
807 req->ist.ei2 = dm_dma_addr - iv_len;
808 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
809 + outputlen - iv_len);
811 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
813 if (likely(iv_len)) {
814 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
816 uint64_t *src = fc_params->iv_buf;
821 *(uint64_t *)offset_vaddr =
822 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
823 ((uint64_t)iv_offset << 8) |
824 ((uint64_t)auth_offset));
827 uint32_t i, g_size_bytes, s_size_bytes;
828 uint64_t dptr_dma, rptr_dma;
829 sg_comp_t *gather_comp;
830 sg_comp_t *scatter_comp;
833 /* This falls under strict SG mode */
834 offset_vaddr = m_vaddr;
836 size = OFF_CTRL_LEN + iv_len;
838 m_vaddr = (uint8_t *)m_vaddr + size;
841 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
843 if (likely(iv_len)) {
844 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
846 uint64_t *src = fc_params->iv_buf;
851 *(uint64_t *)offset_vaddr =
852 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
853 ((uint64_t)iv_offset << 8) |
854 ((uint64_t)auth_offset));
856 /* DPTR has SG list */
860 ((uint16_t *)in_buffer)[0] = 0;
861 ((uint16_t *)in_buffer)[1] = 0;
863 /* TODO Add error check if space will be sufficient */
864 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
872 /* Offset control word that includes iv */
873 i = fill_sg_comp(gather_comp, i, offset_dma,
874 OFF_CTRL_LEN + iv_len);
877 size = inputlen - iv_len;
879 uint32_t aad_offset = aad_len ? passthrough_len : 0;
881 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
882 i = fill_sg_comp_from_buf_min(gather_comp, i,
886 i = fill_sg_comp_from_iov(gather_comp, i,
889 aad_buf, aad_offset);
892 if (unlikely(size)) {
893 CPT_LOG_DP_ERR("Insufficient buffer space,"
894 " size %d needed", size);
898 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
899 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
902 * Output Scatter list
906 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
909 if (likely(iv_len)) {
910 i = fill_sg_comp(scatter_comp, i,
911 offset_dma + OFF_CTRL_LEN,
915 /* output data or output data + digest*/
916 if (unlikely(flags & VALID_MAC_BUF)) {
917 size = outputlen - iv_len - mac_len;
919 uint32_t aad_offset =
920 aad_len ? passthrough_len : 0;
922 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
923 i = fill_sg_comp_from_buf_min(
929 i = fill_sg_comp_from_iov(scatter_comp,
937 if (unlikely(size)) {
938 CPT_LOG_DP_ERR("Insufficient buffer"
939 " space, size %d needed",
946 i = fill_sg_comp_from_buf(scatter_comp, i,
947 &fc_params->mac_buf);
950 /* Output including mac */
951 size = outputlen - iv_len;
953 uint32_t aad_offset =
954 aad_len ? passthrough_len : 0;
956 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
957 i = fill_sg_comp_from_buf_min(
963 i = fill_sg_comp_from_iov(scatter_comp,
971 if (unlikely(size)) {
972 CPT_LOG_DP_ERR("Insufficient buffer"
973 " space, size %d needed",
979 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
980 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
982 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
984 /* This is DPTR len incase of SG mode */
985 vq_cmd_w0.s.dlen = size;
987 m_vaddr = (uint8_t *)m_vaddr + size;
990 /* cpt alternate completion address saved earlier */
991 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
992 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
993 rptr_dma = c_dma - 8;
995 req->ist.ei1 = dptr_dma;
996 req->ist.ei2 = rptr_dma;
999 /* 16 byte aligned cpt res address */
1000 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1001 *req->completion_addr = COMPLETION_CODE_INIT;
1002 req->comp_baddr = c_dma;
1004 /* Fill microcode part of instruction */
1005 req->ist.ei0 = vq_cmd_w0.u64;
1013 static __rte_always_inline void
1014 cpt_dec_hmac_prep(uint32_t flags,
1017 fc_params_t *fc_params,
1021 uint32_t iv_offset = 0, size;
1022 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1023 struct cpt_ctx *cpt_ctx;
1024 int32_t hash_type, mac_len;
1025 uint8_t iv_len = 16;
1026 struct cpt_request_info *req;
1027 buf_ptr_t *meta_p, *aad_buf = NULL;
1028 uint32_t encr_offset, auth_offset;
1029 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1030 uint32_t passthrough_len = 0;
1031 void *m_vaddr, *offset_vaddr;
1032 uint64_t m_dma, offset_dma;
1033 vq_cmd_word0_t vq_cmd_w0;
1037 meta_p = &fc_params->meta_buf;
1038 m_vaddr = meta_p->vaddr;
1039 m_dma = meta_p->dma_addr;
1041 encr_offset = ENCR_OFFSET(d_offs);
1042 auth_offset = AUTH_OFFSET(d_offs);
1043 encr_data_len = ENCR_DLEN(d_lens);
1044 auth_data_len = AUTH_DLEN(d_lens);
1046 if (unlikely(flags & VALID_AAD_BUF)) {
1048 * We dont support both aad
1049 * and auth data separately
1053 aad_len = fc_params->aad_buf.size;
1054 aad_buf = &fc_params->aad_buf;
1057 cpt_ctx = fc_params->ctx_buf.vaddr;
1058 hash_type = cpt_ctx->hash_type;
1059 mac_len = cpt_ctx->mac_len;
1061 if (unlikely(!(flags & VALID_IV_BUF))) {
1063 iv_offset = ENCR_IV_OFFSET(d_offs);
1066 if (unlikely(flags & VALID_AAD_BUF)) {
1068 * When AAD is given, data above encr_offset is pass through
1069 * Since AAD is given as separate pointer and not as offset,
1070 * this is a special case as we need to fragment input data
1071 * into passthrough + encr_data and then insert AAD in between.
1073 if (hash_type != GMAC_TYPE) {
1074 passthrough_len = encr_offset;
1075 auth_offset = passthrough_len + iv_len;
1076 encr_offset = passthrough_len + aad_len + iv_len;
1077 auth_data_len = aad_len + encr_data_len;
1079 passthrough_len = 16 + aad_len;
1080 auth_offset = passthrough_len + iv_len;
1081 auth_data_len = aad_len;
1084 encr_offset += iv_len;
1085 auth_offset += iv_len;
1089 * Save initial space that followed app data for completion code &
1090 * alternate completion code to fall in same cache line as app data
1092 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1093 m_dma += COMPLETION_CODE_SIZE;
1094 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1096 c_vaddr = (uint8_t *)m_vaddr + size;
1097 c_dma = m_dma + size;
1098 size += sizeof(cpt_res_s_t);
1100 m_vaddr = (uint8_t *)m_vaddr + size;
1103 /* start cpt request info structure at 8 byte alignment */
1104 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1107 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1109 size += sizeof(struct cpt_request_info);
1110 m_vaddr = (uint8_t *)m_vaddr + size;
1114 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
1115 vq_cmd_w0.s.opcode.minor = 1;
1117 if (hash_type == GMAC_TYPE) {
1122 enc_dlen = encr_offset + encr_data_len;
1123 auth_dlen = auth_offset + auth_data_len;
1125 if (auth_dlen > enc_dlen) {
1126 inputlen = auth_dlen + mac_len;
1127 outputlen = auth_dlen;
1129 inputlen = enc_dlen + mac_len;
1130 outputlen = enc_dlen;
1133 vq_cmd_w0.s.param1 = encr_data_len;
1134 vq_cmd_w0.s.param2 = auth_data_len;
1137 * In 83XX since we have a limitation of
1138 * IV & Offset control word not part of instruction
1139 * and need to be part of Data Buffer, we check if
1140 * head room is there and then only do the Direct mode processing
1142 if (likely((flags & SINGLE_BUF_INPLACE) &&
1143 (flags & SINGLE_BUF_HEADTAILROOM))) {
1144 void *dm_vaddr = fc_params->bufs[0].vaddr;
1145 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1147 * This flag indicates that there is 24 bytes head room and
1148 * 8 bytes tail room available, so that we get to do
1149 * DIRECT MODE with limitation
1152 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1153 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1154 req->ist.ei1 = offset_dma;
1156 /* RPTR should just exclude offset control word */
1157 req->ist.ei2 = dm_dma_addr - iv_len;
1159 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1160 outputlen - iv_len);
1161 /* since this is decryption,
1162 * don't touch the content of
1163 * alternate ccode space as it contains
1167 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1169 if (likely(iv_len)) {
1170 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1172 uint64_t *src = fc_params->iv_buf;
1177 *(uint64_t *)offset_vaddr =
1178 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1179 ((uint64_t)iv_offset << 8) |
1180 ((uint64_t)auth_offset));
1183 uint64_t dptr_dma, rptr_dma;
1184 uint32_t g_size_bytes, s_size_bytes;
1185 sg_comp_t *gather_comp;
1186 sg_comp_t *scatter_comp;
1190 /* This falls under strict SG mode */
1191 offset_vaddr = m_vaddr;
1193 size = OFF_CTRL_LEN + iv_len;
1195 m_vaddr = (uint8_t *)m_vaddr + size;
1198 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1200 if (likely(iv_len)) {
1201 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1203 uint64_t *src = fc_params->iv_buf;
1208 *(uint64_t *)offset_vaddr =
1209 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1210 ((uint64_t)iv_offset << 8) |
1211 ((uint64_t)auth_offset));
1213 /* DPTR has SG list */
1214 in_buffer = m_vaddr;
1217 ((uint16_t *)in_buffer)[0] = 0;
1218 ((uint16_t *)in_buffer)[1] = 0;
1220 /* TODO Add error check if space will be sufficient */
1221 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1228 /* Offset control word that includes iv */
1229 i = fill_sg_comp(gather_comp, i, offset_dma,
1230 OFF_CTRL_LEN + iv_len);
1232 /* Add input data */
1233 if (flags & VALID_MAC_BUF) {
1234 size = inputlen - iv_len - mac_len;
1236 /* input data only */
1237 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1238 i = fill_sg_comp_from_buf_min(
1243 uint32_t aad_offset = aad_len ?
1244 passthrough_len : 0;
1246 i = fill_sg_comp_from_iov(gather_comp,
1253 if (unlikely(size)) {
1254 CPT_LOG_DP_ERR("Insufficient buffer"
1255 " space, size %d needed",
1263 i = fill_sg_comp_from_buf(gather_comp, i,
1264 &fc_params->mac_buf);
1267 /* input data + mac */
1268 size = inputlen - iv_len;
1270 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1271 i = fill_sg_comp_from_buf_min(
1276 uint32_t aad_offset = aad_len ?
1277 passthrough_len : 0;
1279 if (unlikely(!fc_params->src_iov)) {
1280 CPT_LOG_DP_ERR("Bad input args");
1284 i = fill_sg_comp_from_iov(
1292 if (unlikely(size)) {
1293 CPT_LOG_DP_ERR("Insufficient buffer"
1294 " space, size %d needed",
1300 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1301 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1304 * Output Scatter List
1309 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1313 i = fill_sg_comp(scatter_comp, i,
1314 offset_dma + OFF_CTRL_LEN,
1318 /* Add output data */
1319 size = outputlen - iv_len;
1321 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1322 /* handle single buffer here */
1323 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1327 uint32_t aad_offset = aad_len ?
1328 passthrough_len : 0;
1330 if (unlikely(!fc_params->dst_iov)) {
1331 CPT_LOG_DP_ERR("Bad input args");
1335 i = fill_sg_comp_from_iov(scatter_comp, i,
1336 fc_params->dst_iov, 0,
1341 if (unlikely(size)) {
1342 CPT_LOG_DP_ERR("Insufficient buffer space,"
1343 " size %d needed", size);
1348 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1349 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1351 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1353 /* This is DPTR len incase of SG mode */
1354 vq_cmd_w0.s.dlen = size;
1356 m_vaddr = (uint8_t *)m_vaddr + size;
1359 /* cpt alternate completion address saved earlier */
1360 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1361 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1362 rptr_dma = c_dma - 8;
1363 size += COMPLETION_CODE_SIZE;
1365 req->ist.ei1 = dptr_dma;
1366 req->ist.ei2 = rptr_dma;
1369 /* 16 byte aligned cpt res address */
1370 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1371 *req->completion_addr = COMPLETION_CODE_INIT;
1372 req->comp_baddr = c_dma;
1374 /* Fill microcode part of instruction */
1375 req->ist.ei0 = vq_cmd_w0.u64;
1383 static __rte_always_inline void
1384 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1387 fc_params_t *params,
1392 int32_t inputlen, outputlen;
1393 struct cpt_ctx *cpt_ctx;
1394 uint32_t mac_len = 0;
1396 struct cpt_request_info *req;
1398 uint32_t encr_offset = 0, auth_offset = 0;
1399 uint32_t encr_data_len = 0, auth_data_len = 0;
1400 int flags, iv_len = 16;
1401 void *m_vaddr, *c_vaddr;
1402 uint64_t m_dma, c_dma, offset_ctrl;
1403 uint64_t *offset_vaddr, offset_dma;
1404 uint32_t *iv_s, iv[4];
1405 vq_cmd_word0_t vq_cmd_w0;
1407 buf_p = ¶ms->meta_buf;
1408 m_vaddr = buf_p->vaddr;
1409 m_dma = buf_p->dma_addr;
1411 cpt_ctx = params->ctx_buf.vaddr;
1412 flags = cpt_ctx->zsk_flags;
1413 mac_len = cpt_ctx->mac_len;
1414 snow3g = cpt_ctx->snow3g;
1417 * Save initial space that followed app data for completion code &
1418 * alternate completion code to fall in same cache line as app data
1420 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1421 m_dma += COMPLETION_CODE_SIZE;
1422 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1425 c_vaddr = (uint8_t *)m_vaddr + size;
1426 c_dma = m_dma + size;
1427 size += sizeof(cpt_res_s_t);
1429 m_vaddr = (uint8_t *)m_vaddr + size;
1432 /* Reserve memory for cpt request info */
1435 size = sizeof(struct cpt_request_info);
1436 m_vaddr = (uint8_t *)m_vaddr + size;
1439 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1441 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1443 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1444 (0 << 3) | (flags & 0x7));
1448 * Microcode expects offsets in bytes
1449 * TODO: Rounding off
1451 auth_data_len = AUTH_DLEN(d_lens);
1454 auth_offset = AUTH_OFFSET(d_offs);
1455 auth_offset = auth_offset / 8;
1457 /* consider iv len */
1458 auth_offset += iv_len;
1460 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1461 outputlen = mac_len;
1463 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1468 * Microcode expects offsets in bytes
1469 * TODO: Rounding off
1471 encr_data_len = ENCR_DLEN(d_lens);
1473 encr_offset = ENCR_OFFSET(d_offs);
1474 encr_offset = encr_offset / 8;
1475 /* consider iv len */
1476 encr_offset += iv_len;
1478 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1479 outputlen = inputlen;
1481 /* iv offset is 0 */
1482 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1486 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1491 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1492 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1495 for (j = 0; j < 4; j++)
1496 iv[j] = iv_s[3 - j];
1498 /* ZUC doesn't need a swap */
1499 for (j = 0; j < 4; j++)
1504 * GP op header, lengths are expected in bits.
1506 vq_cmd_w0.s.param1 = encr_data_len;
1507 vq_cmd_w0.s.param2 = auth_data_len;
1510 * In 83XX since we have a limitation of
1511 * IV & Offset control word not part of instruction
1512 * and need to be part of Data Buffer, we check if
1513 * head room is there and then only do the Direct mode processing
1515 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1516 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1517 void *dm_vaddr = params->bufs[0].vaddr;
1518 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1520 * This flag indicates that there is 24 bytes head room and
1521 * 8 bytes tail room available, so that we get to do
1522 * DIRECT MODE with limitation
1525 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1526 OFF_CTRL_LEN - iv_len);
1527 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1530 req->ist.ei1 = offset_dma;
1531 /* RPTR should just exclude offset control word */
1532 req->ist.ei2 = dm_dma_addr - iv_len;
1533 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1534 + outputlen - iv_len);
1536 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1538 if (likely(iv_len)) {
1539 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1541 memcpy(iv_d, iv, 16);
1544 *offset_vaddr = offset_ctrl;
1546 uint32_t i, g_size_bytes, s_size_bytes;
1547 uint64_t dptr_dma, rptr_dma;
1548 sg_comp_t *gather_comp;
1549 sg_comp_t *scatter_comp;
1553 /* save space for iv */
1554 offset_vaddr = m_vaddr;
1557 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1558 m_dma += OFF_CTRL_LEN + iv_len;
1560 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1562 /* DPTR has SG list */
1563 in_buffer = m_vaddr;
1566 ((uint16_t *)in_buffer)[0] = 0;
1567 ((uint16_t *)in_buffer)[1] = 0;
1569 /* TODO Add error check if space will be sufficient */
1570 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1577 /* Offset control word followed by iv */
1579 i = fill_sg_comp(gather_comp, i, offset_dma,
1580 OFF_CTRL_LEN + iv_len);
1582 /* iv offset is 0 */
1583 *offset_vaddr = offset_ctrl;
1585 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1586 memcpy(iv_d, iv, 16);
1589 size = inputlen - iv_len;
1591 i = fill_sg_comp_from_iov(gather_comp, i,
1594 if (unlikely(size)) {
1595 CPT_LOG_DP_ERR("Insufficient buffer space,"
1596 " size %d needed", size);
1600 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1601 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1604 * Output Scatter List
1609 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1612 /* IV in SLIST only for EEA3 & UEA2 */
1617 i = fill_sg_comp(scatter_comp, i,
1618 offset_dma + OFF_CTRL_LEN, iv_len);
1621 /* Add output data */
1622 if (req_flags & VALID_MAC_BUF) {
1623 size = outputlen - iv_len - mac_len;
1625 i = fill_sg_comp_from_iov(scatter_comp, i,
1629 if (unlikely(size)) {
1630 CPT_LOG_DP_ERR("Insufficient buffer space,"
1631 " size %d needed", size);
1638 i = fill_sg_comp_from_buf(scatter_comp, i,
1642 /* Output including mac */
1643 size = outputlen - iv_len;
1645 i = fill_sg_comp_from_iov(scatter_comp, i,
1649 if (unlikely(size)) {
1650 CPT_LOG_DP_ERR("Insufficient buffer space,"
1651 " size %d needed", size);
1656 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1657 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1659 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1661 /* This is DPTR len incase of SG mode */
1662 vq_cmd_w0.s.dlen = size;
1664 m_vaddr = (uint8_t *)m_vaddr + size;
1667 /* cpt alternate completion address saved earlier */
1668 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1669 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1670 rptr_dma = c_dma - 8;
1672 req->ist.ei1 = dptr_dma;
1673 req->ist.ei2 = rptr_dma;
1676 /* 16 byte aligned cpt res address */
1677 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1678 *req->completion_addr = COMPLETION_CODE_INIT;
1679 req->comp_baddr = c_dma;
1681 /* Fill microcode part of instruction */
1682 req->ist.ei0 = vq_cmd_w0.u64;
1690 static __rte_always_inline void
1691 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1694 fc_params_t *params,
1699 int32_t inputlen = 0, outputlen;
1700 struct cpt_ctx *cpt_ctx;
1701 uint8_t snow3g, iv_len = 16;
1702 struct cpt_request_info *req;
1704 uint32_t encr_offset;
1705 uint32_t encr_data_len;
1707 void *m_vaddr, *c_vaddr;
1708 uint64_t m_dma, c_dma;
1709 uint64_t *offset_vaddr, offset_dma;
1710 uint32_t *iv_s, iv[4], j;
1711 vq_cmd_word0_t vq_cmd_w0;
1713 buf_p = ¶ms->meta_buf;
1714 m_vaddr = buf_p->vaddr;
1715 m_dma = buf_p->dma_addr;
1718 * Microcode expects offsets in bytes
1719 * TODO: Rounding off
1721 encr_offset = ENCR_OFFSET(d_offs) / 8;
1722 encr_data_len = ENCR_DLEN(d_lens);
1724 cpt_ctx = params->ctx_buf.vaddr;
1725 flags = cpt_ctx->zsk_flags;
1726 snow3g = cpt_ctx->snow3g;
1728 * Save initial space that followed app data for completion code &
1729 * alternate completion code to fall in same cache line as app data
1731 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1732 m_dma += COMPLETION_CODE_SIZE;
1733 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1736 c_vaddr = (uint8_t *)m_vaddr + size;
1737 c_dma = m_dma + size;
1738 size += sizeof(cpt_res_s_t);
1740 m_vaddr = (uint8_t *)m_vaddr + size;
1743 /* Reserve memory for cpt request info */
1746 size = sizeof(struct cpt_request_info);
1747 m_vaddr = (uint8_t *)m_vaddr + size;
1751 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1753 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1755 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1756 (0 << 3) | (flags & 0x7));
1758 /* consider iv len */
1759 encr_offset += iv_len;
1761 inputlen = encr_offset +
1762 (RTE_ALIGN(encr_data_len, 8) / 8);
1763 outputlen = inputlen;
1766 iv_s = params->iv_buf;
1769 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1770 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1773 for (j = 0; j < 4; j++)
1774 iv[j] = iv_s[3 - j];
1776 /* ZUC doesn't need a swap */
1777 for (j = 0; j < 4; j++)
1782 * GP op header, lengths are expected in bits.
1784 vq_cmd_w0.s.param1 = encr_data_len;
1787 * In 83XX since we have a limitation of
1788 * IV & Offset control word not part of instruction
1789 * and need to be part of Data Buffer, we check if
1790 * head room is there and then only do the Direct mode processing
1792 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1793 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1794 void *dm_vaddr = params->bufs[0].vaddr;
1795 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1797 * This flag indicates that there is 24 bytes head room and
1798 * 8 bytes tail room available, so that we get to do
1799 * DIRECT MODE with limitation
1802 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1803 OFF_CTRL_LEN - iv_len);
1804 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1807 req->ist.ei1 = offset_dma;
1808 /* RPTR should just exclude offset control word */
1809 req->ist.ei2 = dm_dma_addr - iv_len;
1810 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1811 + outputlen - iv_len);
1813 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1815 if (likely(iv_len)) {
1816 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1818 memcpy(iv_d, iv, 16);
1821 /* iv offset is 0 */
1822 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1824 uint32_t i, g_size_bytes, s_size_bytes;
1825 uint64_t dptr_dma, rptr_dma;
1826 sg_comp_t *gather_comp;
1827 sg_comp_t *scatter_comp;
1831 /* save space for offset and iv... */
1832 offset_vaddr = m_vaddr;
1835 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1836 m_dma += OFF_CTRL_LEN + iv_len;
1838 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1840 /* DPTR has SG list */
1841 in_buffer = m_vaddr;
1844 ((uint16_t *)in_buffer)[0] = 0;
1845 ((uint16_t *)in_buffer)[1] = 0;
1847 /* TODO Add error check if space will be sufficient */
1848 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1855 /* Offset control word */
1857 /* iv offset is 0 */
1858 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1860 i = fill_sg_comp(gather_comp, i, offset_dma,
1861 OFF_CTRL_LEN + iv_len);
1863 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1864 memcpy(iv_d, iv, 16);
1866 /* Add input data */
1867 size = inputlen - iv_len;
1869 i = fill_sg_comp_from_iov(gather_comp, i,
1872 if (unlikely(size)) {
1873 CPT_LOG_DP_ERR("Insufficient buffer space,"
1874 " size %d needed", size);
1878 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1879 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1882 * Output Scatter List
1887 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1890 i = fill_sg_comp(scatter_comp, i,
1891 offset_dma + OFF_CTRL_LEN,
1894 /* Add output data */
1895 size = outputlen - iv_len;
1897 i = fill_sg_comp_from_iov(scatter_comp, i,
1901 if (unlikely(size)) {
1902 CPT_LOG_DP_ERR("Insufficient buffer space,"
1903 " size %d needed", size);
1907 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1908 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1910 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1912 /* This is DPTR len incase of SG mode */
1913 vq_cmd_w0.s.dlen = size;
1915 m_vaddr = (uint8_t *)m_vaddr + size;
1918 /* cpt alternate completion address saved earlier */
1919 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1920 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1921 rptr_dma = c_dma - 8;
1923 req->ist.ei1 = dptr_dma;
1924 req->ist.ei2 = rptr_dma;
1927 /* 16 byte aligned cpt res address */
1928 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1929 *req->completion_addr = COMPLETION_CODE_INIT;
1930 req->comp_baddr = c_dma;
1932 /* Fill microcode part of instruction */
1933 req->ist.ei0 = vq_cmd_w0.u64;
1941 static __rte_always_inline void
1942 cpt_kasumi_enc_prep(uint32_t req_flags,
1945 fc_params_t *params,
1950 int32_t inputlen = 0, outputlen = 0;
1951 struct cpt_ctx *cpt_ctx;
1952 uint32_t mac_len = 0;
1954 struct cpt_request_info *req;
1956 uint32_t encr_offset, auth_offset;
1957 uint32_t encr_data_len, auth_data_len;
1959 uint8_t *iv_s, *iv_d, iv_len = 8;
1961 void *m_vaddr, *c_vaddr;
1962 uint64_t m_dma, c_dma;
1963 uint64_t *offset_vaddr, offset_dma;
1964 vq_cmd_word0_t vq_cmd_w0;
1966 uint32_t g_size_bytes, s_size_bytes;
1967 uint64_t dptr_dma, rptr_dma;
1968 sg_comp_t *gather_comp;
1969 sg_comp_t *scatter_comp;
1971 buf_p = ¶ms->meta_buf;
1972 m_vaddr = buf_p->vaddr;
1973 m_dma = buf_p->dma_addr;
1975 encr_offset = ENCR_OFFSET(d_offs) / 8;
1976 auth_offset = AUTH_OFFSET(d_offs) / 8;
1977 encr_data_len = ENCR_DLEN(d_lens);
1978 auth_data_len = AUTH_DLEN(d_lens);
1980 cpt_ctx = params->ctx_buf.vaddr;
1981 flags = cpt_ctx->zsk_flags;
1982 mac_len = cpt_ctx->mac_len;
1985 iv_s = params->iv_buf;
1987 iv_s = params->auth_iv_buf;
1989 dir = iv_s[8] & 0x1;
1992 * Save initial space that followed app data for completion code &
1993 * alternate completion code to fall in same cache line as app data
1995 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1996 m_dma += COMPLETION_CODE_SIZE;
1997 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2000 c_vaddr = (uint8_t *)m_vaddr + size;
2001 c_dma = m_dma + size;
2002 size += sizeof(cpt_res_s_t);
2004 m_vaddr = (uint8_t *)m_vaddr + size;
2007 /* Reserve memory for cpt request info */
2010 size = sizeof(struct cpt_request_info);
2011 m_vaddr = (uint8_t *)m_vaddr + size;
2014 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2016 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2017 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2018 (dir << 4) | (0 << 3) | (flags & 0x7));
2021 * GP op header, lengths are expected in bits.
2023 vq_cmd_w0.s.param1 = encr_data_len;
2024 vq_cmd_w0.s.param2 = auth_data_len;
2026 /* consider iv len */
2028 encr_offset += iv_len;
2029 auth_offset += iv_len;
2032 /* save space for offset ctrl and iv */
2033 offset_vaddr = m_vaddr;
2036 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2037 m_dma += OFF_CTRL_LEN + iv_len;
2039 /* DPTR has SG list */
2040 in_buffer = m_vaddr;
2043 ((uint16_t *)in_buffer)[0] = 0;
2044 ((uint16_t *)in_buffer)[1] = 0;
2046 /* TODO Add error check if space will be sufficient */
2047 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2054 /* Offset control word followed by iv */
2057 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2058 outputlen = inputlen;
2059 /* iv offset is 0 */
2060 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2062 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2063 outputlen = mac_len;
2064 /* iv offset is 0 */
2065 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2068 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2071 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2072 memcpy(iv_d, iv_s, iv_len);
2075 size = inputlen - iv_len;
2077 i = fill_sg_comp_from_iov(gather_comp, i,
2081 if (unlikely(size)) {
2082 CPT_LOG_DP_ERR("Insufficient buffer space,"
2083 " size %d needed", size);
2087 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2088 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2091 * Output Scatter List
2095 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2098 /* IV in SLIST only for F8 */
2104 i = fill_sg_comp(scatter_comp, i,
2105 offset_dma + OFF_CTRL_LEN,
2109 /* Add output data */
2110 if (req_flags & VALID_MAC_BUF) {
2111 size = outputlen - iv_len - mac_len;
2113 i = fill_sg_comp_from_iov(scatter_comp, i,
2117 if (unlikely(size)) {
2118 CPT_LOG_DP_ERR("Insufficient buffer space,"
2119 " size %d needed", size);
2126 i = fill_sg_comp_from_buf(scatter_comp, i,
2130 /* Output including mac */
2131 size = outputlen - iv_len;
2133 i = fill_sg_comp_from_iov(scatter_comp, i,
2137 if (unlikely(size)) {
2138 CPT_LOG_DP_ERR("Insufficient buffer space,"
2139 " size %d needed", size);
2144 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2145 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2147 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2149 /* This is DPTR len incase of SG mode */
2150 vq_cmd_w0.s.dlen = size;
2152 m_vaddr = (uint8_t *)m_vaddr + size;
2155 /* cpt alternate completion address saved earlier */
2156 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2157 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2158 rptr_dma = c_dma - 8;
2160 req->ist.ei1 = dptr_dma;
2161 req->ist.ei2 = rptr_dma;
2163 /* 16 byte aligned cpt res address */
2164 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2165 *req->completion_addr = COMPLETION_CODE_INIT;
2166 req->comp_baddr = c_dma;
2168 /* Fill microcode part of instruction */
2169 req->ist.ei0 = vq_cmd_w0.u64;
2177 static __rte_always_inline void
2178 cpt_kasumi_dec_prep(uint64_t d_offs,
2180 fc_params_t *params,
2185 int32_t inputlen = 0, outputlen;
2186 struct cpt_ctx *cpt_ctx;
2187 uint8_t i = 0, iv_len = 8;
2188 struct cpt_request_info *req;
2190 uint32_t encr_offset;
2191 uint32_t encr_data_len;
2194 void *m_vaddr, *c_vaddr;
2195 uint64_t m_dma, c_dma;
2196 uint64_t *offset_vaddr, offset_dma;
2197 vq_cmd_word0_t vq_cmd_w0;
2199 uint32_t g_size_bytes, s_size_bytes;
2200 uint64_t dptr_dma, rptr_dma;
2201 sg_comp_t *gather_comp;
2202 sg_comp_t *scatter_comp;
2204 buf_p = ¶ms->meta_buf;
2205 m_vaddr = buf_p->vaddr;
2206 m_dma = buf_p->dma_addr;
2208 encr_offset = ENCR_OFFSET(d_offs) / 8;
2209 encr_data_len = ENCR_DLEN(d_lens);
2211 cpt_ctx = params->ctx_buf.vaddr;
2212 flags = cpt_ctx->zsk_flags;
2214 * Save initial space that followed app data for completion code &
2215 * alternate completion code to fall in same cache line as app data
2217 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2218 m_dma += COMPLETION_CODE_SIZE;
2219 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2222 c_vaddr = (uint8_t *)m_vaddr + size;
2223 c_dma = m_dma + size;
2224 size += sizeof(cpt_res_s_t);
2226 m_vaddr = (uint8_t *)m_vaddr + size;
2229 /* Reserve memory for cpt request info */
2232 size = sizeof(struct cpt_request_info);
2233 m_vaddr = (uint8_t *)m_vaddr + size;
2237 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2239 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2240 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2241 (dir << 4) | (0 << 3) | (flags & 0x7));
2244 * GP op header, lengths are expected in bits.
2246 vq_cmd_w0.s.param1 = encr_data_len;
2248 /* consider iv len */
2249 encr_offset += iv_len;
2251 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2252 outputlen = inputlen;
2254 /* save space for offset ctrl & iv */
2255 offset_vaddr = m_vaddr;
2258 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2259 m_dma += OFF_CTRL_LEN + iv_len;
2261 /* DPTR has SG list */
2262 in_buffer = m_vaddr;
2265 ((uint16_t *)in_buffer)[0] = 0;
2266 ((uint16_t *)in_buffer)[1] = 0;
2268 /* TODO Add error check if space will be sufficient */
2269 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2276 /* Offset control word followed by iv */
2277 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2279 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2282 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2283 params->iv_buf, iv_len);
2285 /* Add input data */
2286 size = inputlen - iv_len;
2288 i = fill_sg_comp_from_iov(gather_comp, i,
2291 if (unlikely(size)) {
2292 CPT_LOG_DP_ERR("Insufficient buffer space,"
2293 " size %d needed", size);
2297 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2298 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2301 * Output Scatter List
2305 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2308 i = fill_sg_comp(scatter_comp, i,
2309 offset_dma + OFF_CTRL_LEN,
2312 /* Add output data */
2313 size = outputlen - iv_len;
2315 i = fill_sg_comp_from_iov(scatter_comp, i,
2318 if (unlikely(size)) {
2319 CPT_LOG_DP_ERR("Insufficient buffer space,"
2320 " size %d needed", size);
2324 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2325 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2327 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2329 /* This is DPTR len incase of SG mode */
2330 vq_cmd_w0.s.dlen = size;
2332 m_vaddr = (uint8_t *)m_vaddr + size;
2335 /* cpt alternate completion address saved earlier */
2336 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2337 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2338 rptr_dma = c_dma - 8;
2340 req->ist.ei1 = dptr_dma;
2341 req->ist.ei2 = rptr_dma;
2343 /* 16 byte aligned cpt res address */
2344 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2345 *req->completion_addr = COMPLETION_CODE_INIT;
2346 req->comp_baddr = c_dma;
2348 /* Fill microcode part of instruction */
2349 req->ist.ei0 = vq_cmd_w0.u64;
2357 static __rte_always_inline void *
2358 cpt_fc_dec_hmac_prep(uint32_t flags,
2361 fc_params_t *fc_params,
2364 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2366 void *prep_req = NULL;
2368 fc_type = ctx->fc_type;
2370 if (likely(fc_type == FC_GEN)) {
2371 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2373 } else if (fc_type == ZUC_SNOW3G) {
2374 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2376 } else if (fc_type == KASUMI) {
2377 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2381 * For AUTH_ONLY case,
2382 * MC only supports digest generation and verification
2383 * should be done in software by memcmp()
2389 static __rte_always_inline void *__rte_hot
2390 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2391 fc_params_t *fc_params, void *op)
2393 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2395 void *prep_req = NULL;
2397 fc_type = ctx->fc_type;
2399 /* Common api for rest of the ops */
2400 if (likely(fc_type == FC_GEN)) {
2401 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2403 } else if (fc_type == ZUC_SNOW3G) {
2404 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2406 } else if (fc_type == KASUMI) {
2407 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2409 } else if (fc_type == HASH_HMAC) {
2410 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2416 static __rte_always_inline int
2417 cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
2418 const uint8_t *key, uint16_t key_len, uint16_t mac_len)
2420 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
2421 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
2422 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
2424 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2429 /* No support for AEAD yet */
2430 if (cpt_ctx->enc_cipher)
2432 /* For ZUC/SNOW3G/Kasumi */
2435 cpt_ctx->snow3g = 1;
2436 gen_key_snow3g(key, keyx);
2437 memcpy(zs_ctx->ci_key, keyx, key_len);
2438 cpt_ctx->fc_type = ZUC_SNOW3G;
2439 cpt_ctx->zsk_flags = 0x1;
2442 cpt_ctx->snow3g = 0;
2443 memcpy(zs_ctx->ci_key, key, key_len);
2444 memcpy(zs_ctx->zuc_const, zuc_d, 32);
2445 cpt_ctx->fc_type = ZUC_SNOW3G;
2446 cpt_ctx->zsk_flags = 0x1;
2449 /* Kasumi ECB mode */
2451 memcpy(k_ctx->ci_key, key, key_len);
2452 cpt_ctx->fc_type = KASUMI;
2453 cpt_ctx->zsk_flags = 0x1;
2456 memcpy(k_ctx->ci_key, key, key_len);
2457 cpt_ctx->fc_type = KASUMI;
2458 cpt_ctx->zsk_flags = 0x1;
2463 cpt_ctx->mac_len = 4;
2464 cpt_ctx->hash_type = type;
2468 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2469 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2470 cpt_ctx->fc_type = HASH_HMAC;
2473 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2476 /* For GMAC auth, cipher must be NULL */
2477 if (type == GMAC_TYPE)
2478 fctx->enc.enc_cipher = 0;
2480 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2481 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2485 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2486 memcpy(cpt_ctx->auth_key, key, key_len);
2487 cpt_ctx->auth_key_len = key_len;
2488 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2489 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2492 memcpy(fctx->hmac.opad, key, key_len);
2493 fctx->enc.auth_input_type = 1;
2498 static __rte_always_inline int
2499 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2500 struct cpt_sess_misc *sess)
2502 struct rte_crypto_aead_xform *aead_form;
2503 cipher_type_t enc_type = 0; /* NULL Cipher type */
2504 auth_type_t auth_type = 0; /* NULL Auth type */
2505 uint32_t cipher_key_len = 0;
2506 uint8_t aes_gcm = 0;
2507 aead_form = &xform->aead;
2508 void *ctx = SESS_PRIV(sess);
2510 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2511 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2512 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2513 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2514 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2515 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2517 CPT_LOG_DP_ERR("Unknown aead operation\n");
2520 switch (aead_form->algo) {
2521 case RTE_CRYPTO_AEAD_AES_GCM:
2523 cipher_key_len = 16;
2526 case RTE_CRYPTO_AEAD_AES_CCM:
2527 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2530 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2531 enc_type = CHACHA20;
2532 auth_type = POLY1305;
2533 cipher_key_len = 32;
2534 sess->chacha_poly = 1;
2537 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2541 if (aead_form->key.length < cipher_key_len) {
2542 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2543 (unsigned int long)aead_form->key.length);
2547 sess->aes_gcm = aes_gcm;
2548 sess->mac_len = aead_form->digest_length;
2549 sess->iv_offset = aead_form->iv.offset;
2550 sess->iv_length = aead_form->iv.length;
2551 sess->aad_length = aead_form->aad_length;
2553 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2554 aead_form->key.length, NULL)))
2557 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2558 aead_form->digest_length)))
2564 static __rte_always_inline int
2565 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2566 struct cpt_sess_misc *sess)
2568 struct rte_crypto_cipher_xform *c_form;
2569 cipher_type_t enc_type = 0; /* NULL Cipher type */
2570 uint32_t cipher_key_len = 0;
2571 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2573 c_form = &xform->cipher;
2575 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2576 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2577 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2578 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2580 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2584 switch (c_form->algo) {
2585 case RTE_CRYPTO_CIPHER_AES_CBC:
2587 cipher_key_len = 16;
2589 case RTE_CRYPTO_CIPHER_3DES_CBC:
2590 enc_type = DES3_CBC;
2591 cipher_key_len = 24;
2593 case RTE_CRYPTO_CIPHER_DES_CBC:
2594 /* DES is implemented using 3DES in hardware */
2595 enc_type = DES3_CBC;
2598 case RTE_CRYPTO_CIPHER_AES_CTR:
2600 cipher_key_len = 16;
2603 case RTE_CRYPTO_CIPHER_NULL:
2607 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2608 enc_type = KASUMI_F8_ECB;
2609 cipher_key_len = 16;
2612 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2613 enc_type = SNOW3G_UEA2;
2614 cipher_key_len = 16;
2617 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2618 enc_type = ZUC_EEA3;
2619 cipher_key_len = 16;
2622 case RTE_CRYPTO_CIPHER_AES_XTS:
2624 cipher_key_len = 16;
2626 case RTE_CRYPTO_CIPHER_3DES_ECB:
2627 enc_type = DES3_ECB;
2628 cipher_key_len = 24;
2630 case RTE_CRYPTO_CIPHER_AES_ECB:
2632 cipher_key_len = 16;
2634 case RTE_CRYPTO_CIPHER_3DES_CTR:
2635 case RTE_CRYPTO_CIPHER_AES_F8:
2636 case RTE_CRYPTO_CIPHER_ARC4:
2637 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2641 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2646 if (c_form->key.length < cipher_key_len) {
2647 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2648 (unsigned long) c_form->key.length);
2652 sess->zsk_flag = zsk_flag;
2654 sess->aes_ctr = aes_ctr;
2655 sess->iv_offset = c_form->iv.offset;
2656 sess->iv_length = c_form->iv.length;
2657 sess->is_null = is_null;
2659 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2660 c_form->key.data, c_form->key.length, NULL)))
2666 static __rte_always_inline int
2667 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2668 struct cpt_sess_misc *sess)
2670 struct rte_crypto_auth_xform *a_form;
2671 auth_type_t auth_type = 0; /* NULL Auth type */
2672 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2674 a_form = &xform->auth;
2676 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2677 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2678 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2679 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2681 CPT_LOG_DP_ERR("Unknown auth operation");
2685 switch (a_form->algo) {
2686 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2688 case RTE_CRYPTO_AUTH_SHA1:
2689 auth_type = SHA1_TYPE;
2691 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2692 case RTE_CRYPTO_AUTH_SHA256:
2693 auth_type = SHA2_SHA256;
2695 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2696 case RTE_CRYPTO_AUTH_SHA512:
2697 auth_type = SHA2_SHA512;
2699 case RTE_CRYPTO_AUTH_AES_GMAC:
2700 auth_type = GMAC_TYPE;
2703 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2704 case RTE_CRYPTO_AUTH_SHA224:
2705 auth_type = SHA2_SHA224;
2707 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2708 case RTE_CRYPTO_AUTH_SHA384:
2709 auth_type = SHA2_SHA384;
2711 case RTE_CRYPTO_AUTH_MD5_HMAC:
2712 case RTE_CRYPTO_AUTH_MD5:
2713 auth_type = MD5_TYPE;
2715 case RTE_CRYPTO_AUTH_KASUMI_F9:
2716 auth_type = KASUMI_F9_ECB;
2718 * Indicate that direction needs to be taken out
2723 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2724 auth_type = SNOW3G_UIA2;
2727 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2728 auth_type = ZUC_EIA3;
2731 case RTE_CRYPTO_AUTH_NULL:
2735 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2736 case RTE_CRYPTO_AUTH_AES_CMAC:
2737 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2738 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2742 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2747 sess->zsk_flag = zsk_flag;
2748 sess->aes_gcm = aes_gcm;
2749 sess->mac_len = a_form->digest_length;
2750 sess->is_null = is_null;
2752 sess->auth_iv_offset = a_form->iv.offset;
2753 sess->auth_iv_length = a_form->iv.length;
2755 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2756 a_form->key.data, a_form->key.length,
2757 a_form->digest_length)))
2763 static __rte_always_inline int
2764 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2765 struct cpt_sess_misc *sess)
2767 struct rte_crypto_auth_xform *a_form;
2768 cipher_type_t enc_type = 0; /* NULL Cipher type */
2769 auth_type_t auth_type = 0; /* NULL Auth type */
2770 void *ctx = SESS_PRIV(sess);
2772 a_form = &xform->auth;
2774 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2775 sess->cpt_op |= CPT_OP_ENCODE;
2776 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2777 sess->cpt_op |= CPT_OP_DECODE;
2779 CPT_LOG_DP_ERR("Unknown auth operation");
2783 switch (a_form->algo) {
2784 case RTE_CRYPTO_AUTH_AES_GMAC:
2786 auth_type = GMAC_TYPE;
2789 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2797 sess->iv_offset = a_form->iv.offset;
2798 sess->iv_length = a_form->iv.length;
2799 sess->mac_len = a_form->digest_length;
2801 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2802 a_form->key.length, NULL)))
2805 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2806 a_form->digest_length)))
2812 static __rte_always_inline void *
2813 alloc_op_meta(struct rte_mbuf *m_src,
2816 struct rte_mempool *cpt_meta_pool)
2820 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2821 if (likely(m_src && (m_src->nb_segs == 1))) {
2825 /* Check if tailroom is sufficient to hold meta data */
2826 tailroom = rte_pktmbuf_tailroom(m_src);
2827 if (likely(tailroom > len + 8)) {
2828 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2829 mphys = m_src->buf_iova + m_src->buf_len;
2833 buf->dma_addr = mphys;
2835 /* Indicate that this is a mbuf allocated mdata */
2836 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2841 RTE_SET_USED(m_src);
2844 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2848 buf->dma_addr = rte_mempool_virt2iova(mdata);
2855 * cpt_free_metabuf - free metabuf to mempool.
2856 * @param instance: pointer to instance.
2857 * @param objp: pointer to the metabuf.
2859 static __rte_always_inline void
2860 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2862 bool nofree = ((uintptr_t)mdata & 1ull);
2866 rte_mempool_put(cpt_meta_pool, mdata);
2869 static __rte_always_inline uint32_t
2870 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2871 iov_ptr_t *iovec, uint32_t start_offset)
2874 void *seg_data = NULL;
2875 phys_addr_t seg_phys;
2876 int32_t seg_size = 0;
2883 if (!start_offset) {
2884 seg_data = rte_pktmbuf_mtod(pkt, void *);
2885 seg_phys = rte_pktmbuf_iova(pkt);
2886 seg_size = pkt->data_len;
2888 while (start_offset >= pkt->data_len) {
2889 start_offset -= pkt->data_len;
2893 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2894 seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2895 seg_size = pkt->data_len - start_offset;
2901 iovec->bufs[index].vaddr = seg_data;
2902 iovec->bufs[index].dma_addr = seg_phys;
2903 iovec->bufs[index].size = seg_size;
2907 while (unlikely(pkt != NULL)) {
2908 seg_data = rte_pktmbuf_mtod(pkt, void *);
2909 seg_phys = rte_pktmbuf_iova(pkt);
2910 seg_size = pkt->data_len;
2914 iovec->bufs[index].vaddr = seg_data;
2915 iovec->bufs[index].dma_addr = seg_phys;
2916 iovec->bufs[index].size = seg_size;
2923 iovec->buf_cnt = index;
2927 static __rte_always_inline uint32_t
2928 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2933 void *seg_data = NULL;
2934 phys_addr_t seg_phys;
2935 uint32_t seg_size = 0;
2938 seg_data = rte_pktmbuf_mtod(pkt, void *);
2939 seg_phys = rte_pktmbuf_iova(pkt);
2940 seg_size = pkt->data_len;
2943 if (likely(!pkt->next)) {
2944 uint32_t headroom, tailroom;
2946 *flags |= SINGLE_BUF_INPLACE;
2947 headroom = rte_pktmbuf_headroom(pkt);
2948 tailroom = rte_pktmbuf_tailroom(pkt);
2949 if (likely((headroom >= 24) &&
2951 /* In 83XX this is prerequivisit for Direct mode */
2952 *flags |= SINGLE_BUF_HEADTAILROOM;
2954 param->bufs[0].vaddr = seg_data;
2955 param->bufs[0].dma_addr = seg_phys;
2956 param->bufs[0].size = seg_size;
2959 iovec = param->src_iov;
2960 iovec->bufs[index].vaddr = seg_data;
2961 iovec->bufs[index].dma_addr = seg_phys;
2962 iovec->bufs[index].size = seg_size;
2966 while (unlikely(pkt != NULL)) {
2967 seg_data = rte_pktmbuf_mtod(pkt, void *);
2968 seg_phys = rte_pktmbuf_iova(pkt);
2969 seg_size = pkt->data_len;
2974 iovec->bufs[index].vaddr = seg_data;
2975 iovec->bufs[index].dma_addr = seg_phys;
2976 iovec->bufs[index].size = seg_size;
2983 iovec->buf_cnt = index;
2987 static __rte_always_inline int
2988 fill_fc_params(struct rte_crypto_op *cop,
2989 struct cpt_sess_misc *sess_misc,
2990 struct cpt_qp_meta_info *m_info,
2995 struct rte_crypto_sym_op *sym_op = cop->sym;
2998 uint32_t mc_hash_off;
3000 uint64_t d_offs, d_lens;
3001 struct rte_mbuf *m_src, *m_dst;
3002 uint8_t cpt_op = sess_misc->cpt_op;
3003 #ifdef CPT_ALWAYS_USE_SG_MODE
3004 uint8_t inplace = 0;
3006 uint8_t inplace = 1;
3008 fc_params_t fc_params;
3009 char src[SRC_IOV_SIZE];
3010 char dst[SRC_IOV_SIZE];
3014 if (likely(sess_misc->iv_length)) {
3015 flags |= VALID_IV_BUF;
3016 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3017 uint8_t *, sess_misc->iv_offset);
3018 if (sess_misc->aes_ctr &&
3019 unlikely(sess_misc->iv_length != 16)) {
3020 memcpy((uint8_t *)iv_buf,
3021 rte_crypto_op_ctod_offset(cop,
3022 uint8_t *, sess_misc->iv_offset), 12);
3023 iv_buf[3] = rte_cpu_to_be_32(0x1);
3024 fc_params.iv_buf = iv_buf;
3028 if (sess_misc->zsk_flag) {
3029 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3031 sess_misc->auth_iv_offset);
3032 if (sess_misc->zsk_flag != ZS_EA)
3035 m_src = sym_op->m_src;
3036 m_dst = sym_op->m_dst;
3038 if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3043 d_offs = sym_op->aead.data.offset;
3044 d_lens = sym_op->aead.data.length;
3045 mc_hash_off = sym_op->aead.data.offset +
3046 sym_op->aead.data.length;
3048 aad_data = sym_op->aead.aad.data;
3049 aad_len = sess_misc->aad_length;
3050 if (likely((aad_data + aad_len) ==
3051 rte_pktmbuf_mtod_offset(m_src,
3053 sym_op->aead.data.offset))) {
3054 d_offs = (d_offs - aad_len) | (d_offs << 16);
3055 d_lens = (d_lens + aad_len) | (d_lens << 32);
3057 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3058 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3059 fc_params.aad_buf.size = aad_len;
3060 flags |= VALID_AAD_BUF;
3062 d_offs = d_offs << 16;
3063 d_lens = d_lens << 32;
3066 salt = fc_params.iv_buf;
3067 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3068 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3069 sess_misc->salt = *(uint32_t *)salt;
3071 fc_params.iv_buf = salt + 4;
3072 if (likely(sess_misc->mac_len)) {
3073 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3079 /* hmac immediately following data is best case */
3080 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3082 (uint8_t *)sym_op->aead.digest.data)) {
3083 flags |= VALID_MAC_BUF;
3084 fc_params.mac_buf.size = sess_misc->mac_len;
3085 fc_params.mac_buf.vaddr =
3086 sym_op->aead.digest.data;
3087 fc_params.mac_buf.dma_addr =
3088 sym_op->aead.digest.phys_addr;
3093 d_offs = sym_op->cipher.data.offset;
3094 d_lens = sym_op->cipher.data.length;
3095 mc_hash_off = sym_op->cipher.data.offset +
3096 sym_op->cipher.data.length;
3097 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3098 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3100 if (mc_hash_off < (sym_op->auth.data.offset +
3101 sym_op->auth.data.length)){
3102 mc_hash_off = (sym_op->auth.data.offset +
3103 sym_op->auth.data.length);
3105 /* for gmac, salt should be updated like in gcm */
3106 if (unlikely(sess_misc->is_gmac)) {
3108 salt = fc_params.iv_buf;
3109 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3110 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3111 sess_misc->salt = *(uint32_t *)salt;
3113 fc_params.iv_buf = salt + 4;
3115 if (likely(sess_misc->mac_len)) {
3118 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3122 /* hmac immediately following data is best case */
3123 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3125 (uint8_t *)sym_op->auth.digest.data)) {
3126 flags |= VALID_MAC_BUF;
3127 fc_params.mac_buf.size =
3129 fc_params.mac_buf.vaddr =
3130 sym_op->auth.digest.data;
3131 fc_params.mac_buf.dma_addr =
3132 sym_op->auth.digest.phys_addr;
3137 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3138 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3140 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3143 if (likely(!m_dst && inplace)) {
3144 /* Case of single buffer without AAD buf or
3145 * separate mac buf in place and
3148 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3150 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3153 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3159 /* Out of place processing */
3160 fc_params.src_iov = (void *)src;
3161 fc_params.dst_iov = (void *)dst;
3163 /* Store SG I/O in the api for reuse */
3164 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3165 CPT_LOG_DP_ERR("Prepare src iov failed");
3170 if (unlikely(m_dst != NULL)) {
3173 /* Try to make room as much as src has */
3174 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3176 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3177 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3178 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3179 CPT_LOG_DP_ERR("Not enough space in "
3188 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3189 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3195 fc_params.dst_iov = (void *)src;
3199 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3200 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3201 m_info->lb_mlen, m_info->pool);
3203 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3204 m_info->sg_mlen, m_info->pool);
3206 if (unlikely(mdata == NULL)) {
3207 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3212 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3213 op[0] = (uintptr_t)mdata;
3214 op[1] = (uintptr_t)cop;
3215 op[2] = op[3] = 0; /* Used to indicate auth verify */
3216 space += 4 * sizeof(uint64_t);
3218 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3219 fc_params.meta_buf.dma_addr += space;
3220 fc_params.meta_buf.size -= space;
3222 /* Finally prepare the instruction */
3223 if (cpt_op & CPT_OP_ENCODE)
3224 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3227 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3230 if (unlikely(*prep_req == NULL)) {
3231 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3233 goto free_mdata_and_exit;
3240 free_mdata_and_exit:
3241 free_op_meta(mdata, m_info->pool);
3246 static __rte_always_inline void
3247 compl_auth_verify(struct rte_crypto_op *op,
3252 struct rte_crypto_sym_op *sym_op = op->sym;
3254 if (sym_op->auth.digest.data)
3255 mac = sym_op->auth.digest.data;
3257 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3259 sym_op->auth.data.length +
3260 sym_op->auth.data.offset);
3262 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3266 if (memcmp(mac, gen_mac, mac_len))
3267 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3269 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3272 static __rte_always_inline void
3273 find_kasumif9_direction_and_length(uint8_t *src,
3274 uint32_t counter_num_bytes,
3275 uint32_t *addr_length_in_bits,
3276 uint8_t *addr_direction)
3281 while (!found && counter_num_bytes > 0) {
3282 counter_num_bytes--;
3283 if (src[counter_num_bytes] == 0x00)
3285 pos = rte_bsf32(src[counter_num_bytes]);
3287 if (likely(counter_num_bytes > 0)) {
3288 last_byte = src[counter_num_bytes - 1];
3289 *addr_direction = last_byte & 0x1;
3290 *addr_length_in_bits = counter_num_bytes * 8
3294 last_byte = src[counter_num_bytes];
3295 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3296 *addr_length_in_bits = counter_num_bytes * 8
3304 * This handles all auth only except AES_GMAC
3306 static __rte_always_inline int
3307 fill_digest_params(struct rte_crypto_op *cop,
3308 struct cpt_sess_misc *sess,
3309 struct cpt_qp_meta_info *m_info,
3314 struct rte_crypto_sym_op *sym_op = cop->sym;
3318 uint32_t auth_range_off;
3320 uint64_t d_offs = 0, d_lens;
3321 struct rte_mbuf *m_src, *m_dst;
3322 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3323 uint16_t mac_len = sess->mac_len;
3325 char src[SRC_IOV_SIZE];
3329 memset(¶ms, 0, sizeof(fc_params_t));
3331 m_src = sym_op->m_src;
3333 /* For just digest lets force mempool alloc */
3334 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3336 if (mdata == NULL) {
3341 mphys = params.meta_buf.dma_addr;
3344 op[0] = (uintptr_t)mdata;
3345 op[1] = (uintptr_t)cop;
3346 op[2] = op[3] = 0; /* Used to indicate auth verify */
3347 space += 4 * sizeof(uint64_t);
3349 auth_range_off = sym_op->auth.data.offset;
3351 flags = VALID_MAC_BUF;
3352 params.src_iov = (void *)src;
3353 if (unlikely(sess->zsk_flag)) {
3355 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3356 * we will send pass through even for auth only case,
3359 d_offs = auth_range_off;
3361 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3362 uint8_t *, sess->auth_iv_offset);
3363 if (sess->zsk_flag == K_F9) {
3364 uint32_t length_in_bits, num_bytes;
3365 uint8_t *src, direction = 0;
3367 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3370 * This is kasumi f9, take direction from
3373 length_in_bits = cop->sym->auth.data.length;
3374 num_bytes = (length_in_bits >> 3);
3375 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3376 find_kasumif9_direction_and_length(src,
3380 length_in_bits -= 64;
3381 cop->sym->auth.data.offset += 64;
3382 d_offs = cop->sym->auth.data.offset;
3383 auth_range_off = d_offs / 8;
3384 cop->sym->auth.data.length = length_in_bits;
3386 /* Store it at end of auth iv */
3387 iv_buf[8] = direction;
3388 params.auth_iv_buf = iv_buf;
3392 d_lens = sym_op->auth.data.length;
3394 params.ctx_buf.vaddr = SESS_PRIV(sess);
3395 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3397 if (auth_op == CPT_OP_AUTH_GENERATE) {
3398 if (sym_op->auth.digest.data) {
3400 * Digest to be generated
3401 * in separate buffer
3403 params.mac_buf.size =
3405 params.mac_buf.vaddr =
3406 sym_op->auth.digest.data;
3407 params.mac_buf.dma_addr =
3408 sym_op->auth.digest.phys_addr;
3410 uint32_t off = sym_op->auth.data.offset +
3411 sym_op->auth.data.length;
3412 int32_t dlen, space;
3414 m_dst = sym_op->m_dst ?
3415 sym_op->m_dst : sym_op->m_src;
3416 dlen = rte_pktmbuf_pkt_len(m_dst);
3418 space = off + mac_len - dlen;
3420 if (!rte_pktmbuf_append(m_dst, space)) {
3421 CPT_LOG_DP_ERR("Failed to extend "
3422 "mbuf by %uB", space);
3424 goto free_mdata_and_exit;
3427 params.mac_buf.vaddr =
3428 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3429 params.mac_buf.dma_addr =
3430 rte_pktmbuf_iova_offset(m_dst, off);
3431 params.mac_buf.size = mac_len;
3434 /* Need space for storing generated mac */
3435 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3436 params.mac_buf.dma_addr = mphys + space;
3437 params.mac_buf.size = mac_len;
3438 space += RTE_ALIGN_CEIL(mac_len, 8);
3439 op[2] = (uintptr_t)params.mac_buf.vaddr;
3443 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3444 params.meta_buf.dma_addr = mphys + space;
3445 params.meta_buf.size -= space;
3447 /* Out of place processing */
3448 params.src_iov = (void *)src;
3450 /*Store SG I/O in the api for reuse */
3451 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3452 CPT_LOG_DP_ERR("Prepare src iov failed");
3454 goto free_mdata_and_exit;
3457 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3458 if (unlikely(*prep_req == NULL)) {
3460 goto free_mdata_and_exit;
3467 free_mdata_and_exit:
3468 free_op_meta(mdata, m_info->pool);
3473 #endif /*_CPT_UCODE_H_ */