1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline int
39 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 uint16_t mac_len = auth->digest_length;
45 case RTE_CRYPTO_AUTH_MD5:
46 case RTE_CRYPTO_AUTH_MD5_HMAC:
47 ret = (mac_len == 16) ? 0 : -1;
49 case RTE_CRYPTO_AUTH_SHA1:
50 case RTE_CRYPTO_AUTH_SHA1_HMAC:
51 ret = (mac_len == 20) ? 0 : -1;
53 case RTE_CRYPTO_AUTH_SHA224:
54 case RTE_CRYPTO_AUTH_SHA224_HMAC:
55 ret = (mac_len == 28) ? 0 : -1;
57 case RTE_CRYPTO_AUTH_SHA256:
58 case RTE_CRYPTO_AUTH_SHA256_HMAC:
59 ret = (mac_len == 32) ? 0 : -1;
61 case RTE_CRYPTO_AUTH_SHA384:
62 case RTE_CRYPTO_AUTH_SHA384_HMAC:
63 ret = (mac_len == 48) ? 0 : -1;
65 case RTE_CRYPTO_AUTH_SHA512:
66 case RTE_CRYPTO_AUTH_SHA512_HMAC:
67 ret = (mac_len == 64) ? 0 : -1;
69 case RTE_CRYPTO_AUTH_NULL:
79 static __rte_always_inline void
80 cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
83 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline int
88 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
100 static __rte_always_inline int
101 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
117 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 key_len = key_len / 2;
126 if (unlikely(key_len == 24)) {
127 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
130 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
136 if (unlikely(key_len != 16))
138 /* No support for AEAD yet */
139 if (unlikely(ctx->hash_type))
141 fc_type = ZUC_SNOW3G;
145 if (unlikely(key_len != 16))
147 /* No support for AEAD yet */
148 if (unlikely(ctx->hash_type))
156 ctx->fc_type = fc_type;
160 static __rte_always_inline void
161 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
163 cpt_ctx->enc_cipher = 0;
164 fctx->enc.enc_cipher = 0;
167 static __rte_always_inline void
168 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
170 mc_aes_type_t aes_key_type = 0;
173 aes_key_type = AES_128_BIT;
176 aes_key_type = AES_192_BIT;
179 aes_key_type = AES_256_BIT;
182 /* This should not happen */
183 CPT_LOG_DP_ERR("Invalid AES key len");
186 fctx->enc.aes_key = aes_key_type;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
197 gen_key_snow3g(key, keyx);
198 memcpy(zs_ctx->ci_key, keyx, key_len);
199 cpt_ctx->zsk_flags = 0;
202 static __rte_always_inline void
203 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
206 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
209 memcpy(zs_ctx->ci_key, key, key_len);
210 memcpy(zs_ctx->zuc_const, zuc_d, 32);
211 cpt_ctx->zsk_flags = 0;
214 static __rte_always_inline void
215 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
218 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
221 memcpy(k_ctx->ci_key, key, key_len);
222 cpt_ctx->zsk_flags = 0;
225 static __rte_always_inline void
226 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
229 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
231 memcpy(k_ctx->ci_key, key, key_len);
232 cpt_ctx->zsk_flags = 0;
235 static __rte_always_inline int
236 cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
237 const uint8_t *key, uint16_t key_len, uint8_t *salt)
239 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
242 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
246 if (cpt_ctx->fc_type == FC_GEN) {
248 * We need to always say IV is from DPTR as user can
249 * sometimes iverride IV per operation.
251 fctx->enc.iv_source = CPT_FROM_DPTR;
253 if (cpt_ctx->auth_key_len > 64)
259 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
262 /* CPT performs DES using 3DES with the 8B DES-key
263 * replicated 2 more times to match the 24B 3DES-key.
264 * Eg. If org. key is "0x0a 0x0b", then new key is
265 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
268 /* Skipping the first 8B as it will be copied
269 * in the regular code flow
271 memcpy(fctx->enc.encr_key+key_len, key, key_len);
272 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
276 /* For DES3_ECB IV need to be from CTX. */
277 fctx->enc.iv_source = CPT_FROM_CTX;
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 /* Even though iv source is from dptr,
288 * aes_gcm salt is taken from ctx
291 memcpy(fctx->enc.encr_iv, salt, 4);
292 /* Assuming it was just salt update
298 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
301 key_len = key_len / 2;
302 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
304 /* Copy key2 for XTS into ipad */
305 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
306 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
309 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
312 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
315 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
318 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
324 /* Only for FC_GEN case */
326 /* For GMAC auth, cipher must be NULL */
327 if (cpt_ctx->hash_type != GMAC_TYPE)
328 fctx->enc.enc_cipher = type;
330 memcpy(fctx->enc.encr_key, key, key_len);
333 cpt_ctx->enc_cipher = type;
338 static __rte_always_inline uint32_t
339 fill_sg_comp(sg_comp_t *list,
341 phys_addr_t dma_addr,
344 sg_comp_t *to = &list[i>>2];
346 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
347 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
352 static __rte_always_inline uint32_t
353 fill_sg_comp_from_buf(sg_comp_t *list,
357 sg_comp_t *to = &list[i>>2];
359 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
360 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
365 static __rte_always_inline uint32_t
366 fill_sg_comp_from_buf_min(sg_comp_t *list,
371 sg_comp_t *to = &list[i >> 2];
372 uint32_t size = *psize;
375 e_len = (size > from->size) ? from->size : size;
376 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
377 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
384 * This fills the MC expected SGIO list
385 * from IOV given by user.
387 static __rte_always_inline uint32_t
388 fill_sg_comp_from_iov(sg_comp_t *list,
390 iov_ptr_t *from, uint32_t from_offset,
391 uint32_t *psize, buf_ptr_t *extra_buf,
392 uint32_t extra_offset)
395 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
396 uint32_t size = *psize;
400 for (j = 0; (j < from->buf_cnt) && size; j++) {
401 phys_addr_t e_dma_addr;
403 sg_comp_t *to = &list[i >> 2];
405 if (unlikely(from_offset)) {
406 if (from_offset >= bufs[j].size) {
407 from_offset -= bufs[j].size;
410 e_dma_addr = bufs[j].dma_addr + from_offset;
411 e_len = (size > (bufs[j].size - from_offset)) ?
412 (bufs[j].size - from_offset) : size;
415 e_dma_addr = bufs[j].dma_addr;
416 e_len = (size > bufs[j].size) ?
420 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
421 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
423 if (extra_len && (e_len >= extra_offset)) {
424 /* Break the data at given offset */
425 uint32_t next_len = e_len - extra_offset;
426 phys_addr_t next_dma = e_dma_addr + extra_offset;
431 e_len = extra_offset;
433 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
436 extra_len = RTE_MIN(extra_len, size);
437 /* Insert extra data ptr */
442 rte_cpu_to_be_16(extra_len);
444 rte_cpu_to_be_64(extra_buf->dma_addr);
448 next_len = RTE_MIN(next_len, size);
449 /* insert the rest of the data */
453 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
454 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
463 extra_offset -= size;
471 static __rte_always_inline void
472 cpt_digest_gen_prep(uint32_t flags,
474 digest_params_t *params,
478 struct cpt_request_info *req;
480 uint16_t data_len, mac_len, key_len;
481 auth_type_t hash_type;
484 sg_comp_t *gather_comp;
485 sg_comp_t *scatter_comp;
487 uint32_t g_size_bytes, s_size_bytes;
488 uint64_t dptr_dma, rptr_dma;
489 vq_cmd_word0_t vq_cmd_w0;
490 void *c_vaddr, *m_vaddr;
491 uint64_t c_dma, m_dma;
493 ctx = params->ctx_buf.vaddr;
494 meta_p = ¶ms->meta_buf;
496 m_vaddr = meta_p->vaddr;
497 m_dma = meta_p->dma_addr;
500 * Save initial space that followed app data for completion code &
501 * alternate completion code to fall in same cache line as app data
503 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
504 m_dma += COMPLETION_CODE_SIZE;
505 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
507 c_vaddr = (uint8_t *)m_vaddr + size;
508 c_dma = m_dma + size;
509 size += sizeof(cpt_res_s_t);
511 m_vaddr = (uint8_t *)m_vaddr + size;
516 size = sizeof(struct cpt_request_info);
517 m_vaddr = (uint8_t *)m_vaddr + size;
520 hash_type = ctx->hash_type;
521 mac_len = ctx->mac_len;
522 key_len = ctx->auth_key_len;
523 data_len = AUTH_DLEN(d_lens);
526 vq_cmd_w0.s.opcode.minor = 0;
527 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
529 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
530 vq_cmd_w0.s.param1 = key_len;
531 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
533 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
534 vq_cmd_w0.s.param1 = 0;
535 vq_cmd_w0.s.dlen = data_len;
538 /* Null auth only case enters the if */
539 if (unlikely(!hash_type && !ctx->enc_cipher)) {
540 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
541 /* Minor op is passthrough */
542 vq_cmd_w0.s.opcode.minor = 0x03;
543 /* Send out completion code only */
544 vq_cmd_w0.s.param2 = 0x1;
547 /* DPTR has SG list */
551 ((uint16_t *)in_buffer)[0] = 0;
552 ((uint16_t *)in_buffer)[1] = 0;
554 /* TODO Add error check if space will be sufficient */
555 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
564 uint64_t k_dma = params->ctx_buf.dma_addr +
565 offsetof(struct cpt_ctx, auth_key);
567 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
573 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
575 if (unlikely(size)) {
576 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
582 * Looks like we need to support zero data
583 * gather ptr in case of hash & hmac
587 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
588 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
595 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
597 if (flags & VALID_MAC_BUF) {
598 if (unlikely(params->mac_buf.size < mac_len)) {
599 CPT_LOG_DP_ERR("Insufficient MAC size");
604 i = fill_sg_comp_from_buf_min(scatter_comp, i,
605 ¶ms->mac_buf, &size);
608 i = fill_sg_comp_from_iov(scatter_comp, i,
609 params->src_iov, data_len,
611 if (unlikely(size)) {
612 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
618 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
619 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
621 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
623 /* This is DPTR len incase of SG mode */
624 vq_cmd_w0.s.dlen = size;
626 m_vaddr = (uint8_t *)m_vaddr + size;
629 /* cpt alternate completion address saved earlier */
630 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
631 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
632 rptr_dma = c_dma - 8;
634 req->ist.ei1 = dptr_dma;
635 req->ist.ei2 = rptr_dma;
637 /* 16 byte aligned cpt res address */
638 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
639 *req->completion_addr = COMPLETION_CODE_INIT;
640 req->comp_baddr = c_dma;
642 /* Fill microcode part of instruction */
643 req->ist.ei0 = vq_cmd_w0.u64;
651 static __rte_always_inline void
652 cpt_enc_hmac_prep(uint32_t flags,
655 fc_params_t *fc_params,
659 uint32_t iv_offset = 0;
660 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
661 struct cpt_ctx *cpt_ctx;
662 uint32_t cipher_type, hash_type;
663 uint32_t mac_len, size;
665 struct cpt_request_info *req;
666 buf_ptr_t *meta_p, *aad_buf = NULL;
667 uint32_t encr_offset, auth_offset;
668 uint32_t encr_data_len, auth_data_len, aad_len = 0;
669 uint32_t passthrough_len = 0;
670 void *m_vaddr, *offset_vaddr;
671 uint64_t m_dma, offset_dma;
672 vq_cmd_word0_t vq_cmd_w0;
676 meta_p = &fc_params->meta_buf;
677 m_vaddr = meta_p->vaddr;
678 m_dma = meta_p->dma_addr;
680 encr_offset = ENCR_OFFSET(d_offs);
681 auth_offset = AUTH_OFFSET(d_offs);
682 encr_data_len = ENCR_DLEN(d_lens);
683 auth_data_len = AUTH_DLEN(d_lens);
684 if (unlikely(flags & VALID_AAD_BUF)) {
686 * We dont support both aad
687 * and auth data separately
691 aad_len = fc_params->aad_buf.size;
692 aad_buf = &fc_params->aad_buf;
694 cpt_ctx = fc_params->ctx_buf.vaddr;
695 cipher_type = cpt_ctx->enc_cipher;
696 hash_type = cpt_ctx->hash_type;
697 mac_len = cpt_ctx->mac_len;
700 * Save initial space that followed app data for completion code &
701 * alternate completion code to fall in same cache line as app data
703 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
704 m_dma += COMPLETION_CODE_SIZE;
705 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
708 c_vaddr = (uint8_t *)m_vaddr + size;
709 c_dma = m_dma + size;
710 size += sizeof(cpt_res_s_t);
712 m_vaddr = (uint8_t *)m_vaddr + size;
715 /* start cpt request info struct at 8 byte boundary */
716 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
719 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
721 size += sizeof(struct cpt_request_info);
722 m_vaddr = (uint8_t *)m_vaddr + size;
725 if (unlikely(!(flags & VALID_IV_BUF))) {
727 iv_offset = ENCR_IV_OFFSET(d_offs);
730 if (unlikely(flags & VALID_AAD_BUF)) {
732 * When AAD is given, data above encr_offset is pass through
733 * Since AAD is given as separate pointer and not as offset,
734 * this is a special case as we need to fragment input data
735 * into passthrough + encr_data and then insert AAD in between.
737 if (hash_type != GMAC_TYPE) {
738 passthrough_len = encr_offset;
739 auth_offset = passthrough_len + iv_len;
740 encr_offset = passthrough_len + aad_len + iv_len;
741 auth_data_len = aad_len + encr_data_len;
743 passthrough_len = 16 + aad_len;
744 auth_offset = passthrough_len + iv_len;
745 auth_data_len = aad_len;
748 encr_offset += iv_len;
749 auth_offset += iv_len;
753 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
754 vq_cmd_w0.s.opcode.minor = 0;
756 if (hash_type == GMAC_TYPE) {
761 auth_dlen = auth_offset + auth_data_len;
762 enc_dlen = encr_data_len + encr_offset;
763 if (unlikely(encr_data_len & 0xf)) {
764 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
765 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
766 else if (likely((cipher_type == AES_CBC) ||
767 (cipher_type == AES_ECB)))
768 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
771 if (unlikely(auth_dlen > enc_dlen)) {
772 inputlen = auth_dlen;
773 outputlen = auth_dlen + mac_len;
776 outputlen = enc_dlen + mac_len;
780 vq_cmd_w0.s.param1 = encr_data_len;
781 vq_cmd_w0.s.param2 = auth_data_len;
783 * In 83XX since we have a limitation of
784 * IV & Offset control word not part of instruction
785 * and need to be part of Data Buffer, we check if
786 * head room is there and then only do the Direct mode processing
788 if (likely((flags & SINGLE_BUF_INPLACE) &&
789 (flags & SINGLE_BUF_HEADTAILROOM))) {
790 void *dm_vaddr = fc_params->bufs[0].vaddr;
791 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
793 * This flag indicates that there is 24 bytes head room and
794 * 8 bytes tail room available, so that we get to do
795 * DIRECT MODE with limitation
798 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
799 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
802 req->ist.ei1 = offset_dma;
803 /* RPTR should just exclude offset control word */
804 req->ist.ei2 = dm_dma_addr - iv_len;
805 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
806 + outputlen - iv_len);
808 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
810 if (likely(iv_len)) {
811 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
813 uint64_t *src = fc_params->iv_buf;
818 *(uint64_t *)offset_vaddr =
819 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
820 ((uint64_t)iv_offset << 8) |
821 ((uint64_t)auth_offset));
824 uint32_t i, g_size_bytes, s_size_bytes;
825 uint64_t dptr_dma, rptr_dma;
826 sg_comp_t *gather_comp;
827 sg_comp_t *scatter_comp;
830 /* This falls under strict SG mode */
831 offset_vaddr = m_vaddr;
833 size = OFF_CTRL_LEN + iv_len;
835 m_vaddr = (uint8_t *)m_vaddr + size;
838 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
840 if (likely(iv_len)) {
841 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
843 uint64_t *src = fc_params->iv_buf;
848 *(uint64_t *)offset_vaddr =
849 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
850 ((uint64_t)iv_offset << 8) |
851 ((uint64_t)auth_offset));
853 /* DPTR has SG list */
857 ((uint16_t *)in_buffer)[0] = 0;
858 ((uint16_t *)in_buffer)[1] = 0;
860 /* TODO Add error check if space will be sufficient */
861 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
869 /* Offset control word that includes iv */
870 i = fill_sg_comp(gather_comp, i, offset_dma,
871 OFF_CTRL_LEN + iv_len);
874 size = inputlen - iv_len;
876 uint32_t aad_offset = aad_len ? passthrough_len : 0;
878 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
879 i = fill_sg_comp_from_buf_min(gather_comp, i,
883 i = fill_sg_comp_from_iov(gather_comp, i,
886 aad_buf, aad_offset);
889 if (unlikely(size)) {
890 CPT_LOG_DP_ERR("Insufficient buffer space,"
891 " size %d needed", size);
895 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
896 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
899 * Output Scatter list
903 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
906 if (likely(iv_len)) {
907 i = fill_sg_comp(scatter_comp, i,
908 offset_dma + OFF_CTRL_LEN,
912 /* output data or output data + digest*/
913 if (unlikely(flags & VALID_MAC_BUF)) {
914 size = outputlen - iv_len - mac_len;
916 uint32_t aad_offset =
917 aad_len ? passthrough_len : 0;
919 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
920 i = fill_sg_comp_from_buf_min(
926 i = fill_sg_comp_from_iov(scatter_comp,
934 if (unlikely(size)) {
935 CPT_LOG_DP_ERR("Insufficient buffer"
936 " space, size %d needed",
943 i = fill_sg_comp_from_buf(scatter_comp, i,
944 &fc_params->mac_buf);
947 /* Output including mac */
948 size = outputlen - iv_len;
950 uint32_t aad_offset =
951 aad_len ? passthrough_len : 0;
953 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
954 i = fill_sg_comp_from_buf_min(
960 i = fill_sg_comp_from_iov(scatter_comp,
968 if (unlikely(size)) {
969 CPT_LOG_DP_ERR("Insufficient buffer"
970 " space, size %d needed",
976 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
977 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
979 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
981 /* This is DPTR len incase of SG mode */
982 vq_cmd_w0.s.dlen = size;
984 m_vaddr = (uint8_t *)m_vaddr + size;
987 /* cpt alternate completion address saved earlier */
988 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
989 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
990 rptr_dma = c_dma - 8;
992 req->ist.ei1 = dptr_dma;
993 req->ist.ei2 = rptr_dma;
996 /* 16 byte aligned cpt res address */
997 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
998 *req->completion_addr = COMPLETION_CODE_INIT;
999 req->comp_baddr = c_dma;
1001 /* Fill microcode part of instruction */
1002 req->ist.ei0 = vq_cmd_w0.u64;
1010 static __rte_always_inline void
1011 cpt_dec_hmac_prep(uint32_t flags,
1014 fc_params_t *fc_params,
1018 uint32_t iv_offset = 0, size;
1019 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1020 struct cpt_ctx *cpt_ctx;
1021 int32_t hash_type, mac_len;
1022 uint8_t iv_len = 16;
1023 struct cpt_request_info *req;
1024 buf_ptr_t *meta_p, *aad_buf = NULL;
1025 uint32_t encr_offset, auth_offset;
1026 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1027 uint32_t passthrough_len = 0;
1028 void *m_vaddr, *offset_vaddr;
1029 uint64_t m_dma, offset_dma;
1030 vq_cmd_word0_t vq_cmd_w0;
1034 meta_p = &fc_params->meta_buf;
1035 m_vaddr = meta_p->vaddr;
1036 m_dma = meta_p->dma_addr;
1038 encr_offset = ENCR_OFFSET(d_offs);
1039 auth_offset = AUTH_OFFSET(d_offs);
1040 encr_data_len = ENCR_DLEN(d_lens);
1041 auth_data_len = AUTH_DLEN(d_lens);
1043 if (unlikely(flags & VALID_AAD_BUF)) {
1045 * We dont support both aad
1046 * and auth data separately
1050 aad_len = fc_params->aad_buf.size;
1051 aad_buf = &fc_params->aad_buf;
1054 cpt_ctx = fc_params->ctx_buf.vaddr;
1055 hash_type = cpt_ctx->hash_type;
1056 mac_len = cpt_ctx->mac_len;
1058 if (unlikely(!(flags & VALID_IV_BUF))) {
1060 iv_offset = ENCR_IV_OFFSET(d_offs);
1063 if (unlikely(flags & VALID_AAD_BUF)) {
1065 * When AAD is given, data above encr_offset is pass through
1066 * Since AAD is given as separate pointer and not as offset,
1067 * this is a special case as we need to fragment input data
1068 * into passthrough + encr_data and then insert AAD in between.
1070 if (hash_type != GMAC_TYPE) {
1071 passthrough_len = encr_offset;
1072 auth_offset = passthrough_len + iv_len;
1073 encr_offset = passthrough_len + aad_len + iv_len;
1074 auth_data_len = aad_len + encr_data_len;
1076 passthrough_len = 16 + aad_len;
1077 auth_offset = passthrough_len + iv_len;
1078 auth_data_len = aad_len;
1081 encr_offset += iv_len;
1082 auth_offset += iv_len;
1086 * Save initial space that followed app data for completion code &
1087 * alternate completion code to fall in same cache line as app data
1089 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1090 m_dma += COMPLETION_CODE_SIZE;
1091 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1093 c_vaddr = (uint8_t *)m_vaddr + size;
1094 c_dma = m_dma + size;
1095 size += sizeof(cpt_res_s_t);
1097 m_vaddr = (uint8_t *)m_vaddr + size;
1100 /* start cpt request info structure at 8 byte alignment */
1101 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1104 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1106 size += sizeof(struct cpt_request_info);
1107 m_vaddr = (uint8_t *)m_vaddr + size;
1111 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
1112 vq_cmd_w0.s.opcode.minor = 1;
1114 if (hash_type == GMAC_TYPE) {
1119 enc_dlen = encr_offset + encr_data_len;
1120 auth_dlen = auth_offset + auth_data_len;
1122 if (auth_dlen > enc_dlen) {
1123 inputlen = auth_dlen + mac_len;
1124 outputlen = auth_dlen;
1126 inputlen = enc_dlen + mac_len;
1127 outputlen = enc_dlen;
1130 vq_cmd_w0.s.param1 = encr_data_len;
1131 vq_cmd_w0.s.param2 = auth_data_len;
1134 * In 83XX since we have a limitation of
1135 * IV & Offset control word not part of instruction
1136 * and need to be part of Data Buffer, we check if
1137 * head room is there and then only do the Direct mode processing
1139 if (likely((flags & SINGLE_BUF_INPLACE) &&
1140 (flags & SINGLE_BUF_HEADTAILROOM))) {
1141 void *dm_vaddr = fc_params->bufs[0].vaddr;
1142 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1144 * This flag indicates that there is 24 bytes head room and
1145 * 8 bytes tail room available, so that we get to do
1146 * DIRECT MODE with limitation
1149 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1150 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1151 req->ist.ei1 = offset_dma;
1153 /* RPTR should just exclude offset control word */
1154 req->ist.ei2 = dm_dma_addr - iv_len;
1156 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1157 outputlen - iv_len);
1158 /* since this is decryption,
1159 * don't touch the content of
1160 * alternate ccode space as it contains
1164 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1166 if (likely(iv_len)) {
1167 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1169 uint64_t *src = fc_params->iv_buf;
1174 *(uint64_t *)offset_vaddr =
1175 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1176 ((uint64_t)iv_offset << 8) |
1177 ((uint64_t)auth_offset));
1180 uint64_t dptr_dma, rptr_dma;
1181 uint32_t g_size_bytes, s_size_bytes;
1182 sg_comp_t *gather_comp;
1183 sg_comp_t *scatter_comp;
1187 /* This falls under strict SG mode */
1188 offset_vaddr = m_vaddr;
1190 size = OFF_CTRL_LEN + iv_len;
1192 m_vaddr = (uint8_t *)m_vaddr + size;
1195 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1197 if (likely(iv_len)) {
1198 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1200 uint64_t *src = fc_params->iv_buf;
1205 *(uint64_t *)offset_vaddr =
1206 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1207 ((uint64_t)iv_offset << 8) |
1208 ((uint64_t)auth_offset));
1210 /* DPTR has SG list */
1211 in_buffer = m_vaddr;
1214 ((uint16_t *)in_buffer)[0] = 0;
1215 ((uint16_t *)in_buffer)[1] = 0;
1217 /* TODO Add error check if space will be sufficient */
1218 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1225 /* Offset control word that includes iv */
1226 i = fill_sg_comp(gather_comp, i, offset_dma,
1227 OFF_CTRL_LEN + iv_len);
1229 /* Add input data */
1230 if (flags & VALID_MAC_BUF) {
1231 size = inputlen - iv_len - mac_len;
1233 /* input data only */
1234 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1235 i = fill_sg_comp_from_buf_min(
1240 uint32_t aad_offset = aad_len ?
1241 passthrough_len : 0;
1243 i = fill_sg_comp_from_iov(gather_comp,
1250 if (unlikely(size)) {
1251 CPT_LOG_DP_ERR("Insufficient buffer"
1252 " space, size %d needed",
1260 i = fill_sg_comp_from_buf(gather_comp, i,
1261 &fc_params->mac_buf);
1264 /* input data + mac */
1265 size = inputlen - iv_len;
1267 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1268 i = fill_sg_comp_from_buf_min(
1273 uint32_t aad_offset = aad_len ?
1274 passthrough_len : 0;
1276 if (unlikely(!fc_params->src_iov)) {
1277 CPT_LOG_DP_ERR("Bad input args");
1281 i = fill_sg_comp_from_iov(
1289 if (unlikely(size)) {
1290 CPT_LOG_DP_ERR("Insufficient buffer"
1291 " space, size %d needed",
1297 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1298 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1301 * Output Scatter List
1306 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1310 i = fill_sg_comp(scatter_comp, i,
1311 offset_dma + OFF_CTRL_LEN,
1315 /* Add output data */
1316 size = outputlen - iv_len;
1318 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1319 /* handle single buffer here */
1320 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1324 uint32_t aad_offset = aad_len ?
1325 passthrough_len : 0;
1327 if (unlikely(!fc_params->dst_iov)) {
1328 CPT_LOG_DP_ERR("Bad input args");
1332 i = fill_sg_comp_from_iov(scatter_comp, i,
1333 fc_params->dst_iov, 0,
1338 if (unlikely(size)) {
1339 CPT_LOG_DP_ERR("Insufficient buffer space,"
1340 " size %d needed", size);
1345 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1346 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1348 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1350 /* This is DPTR len incase of SG mode */
1351 vq_cmd_w0.s.dlen = size;
1353 m_vaddr = (uint8_t *)m_vaddr + size;
1356 /* cpt alternate completion address saved earlier */
1357 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1358 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1359 rptr_dma = c_dma - 8;
1360 size += COMPLETION_CODE_SIZE;
1362 req->ist.ei1 = dptr_dma;
1363 req->ist.ei2 = rptr_dma;
1366 /* 16 byte aligned cpt res address */
1367 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1368 *req->completion_addr = COMPLETION_CODE_INIT;
1369 req->comp_baddr = c_dma;
1371 /* Fill microcode part of instruction */
1372 req->ist.ei0 = vq_cmd_w0.u64;
1380 static __rte_always_inline void
1381 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1384 fc_params_t *params,
1389 int32_t inputlen, outputlen;
1390 struct cpt_ctx *cpt_ctx;
1391 uint32_t mac_len = 0;
1393 struct cpt_request_info *req;
1395 uint32_t encr_offset = 0, auth_offset = 0;
1396 uint32_t encr_data_len = 0, auth_data_len = 0;
1397 int flags, iv_len = 16;
1398 void *m_vaddr, *c_vaddr;
1399 uint64_t m_dma, c_dma, offset_ctrl;
1400 uint64_t *offset_vaddr, offset_dma;
1401 uint32_t *iv_s, iv[4];
1402 vq_cmd_word0_t vq_cmd_w0;
1404 buf_p = ¶ms->meta_buf;
1405 m_vaddr = buf_p->vaddr;
1406 m_dma = buf_p->dma_addr;
1408 cpt_ctx = params->ctx_buf.vaddr;
1409 flags = cpt_ctx->zsk_flags;
1410 mac_len = cpt_ctx->mac_len;
1411 snow3g = cpt_ctx->snow3g;
1414 * Save initial space that followed app data for completion code &
1415 * alternate completion code to fall in same cache line as app data
1417 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1418 m_dma += COMPLETION_CODE_SIZE;
1419 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1422 c_vaddr = (uint8_t *)m_vaddr + size;
1423 c_dma = m_dma + size;
1424 size += sizeof(cpt_res_s_t);
1426 m_vaddr = (uint8_t *)m_vaddr + size;
1429 /* Reserve memory for cpt request info */
1432 size = sizeof(struct cpt_request_info);
1433 m_vaddr = (uint8_t *)m_vaddr + size;
1436 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1438 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1440 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1441 (0 << 3) | (flags & 0x7));
1445 * Microcode expects offsets in bytes
1446 * TODO: Rounding off
1448 auth_data_len = AUTH_DLEN(d_lens);
1451 auth_offset = AUTH_OFFSET(d_offs);
1452 auth_offset = auth_offset / 8;
1454 /* consider iv len */
1455 auth_offset += iv_len;
1457 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1458 outputlen = mac_len;
1460 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1465 * Microcode expects offsets in bytes
1466 * TODO: Rounding off
1468 encr_data_len = ENCR_DLEN(d_lens);
1470 encr_offset = ENCR_OFFSET(d_offs);
1471 encr_offset = encr_offset / 8;
1472 /* consider iv len */
1473 encr_offset += iv_len;
1475 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1476 outputlen = inputlen;
1478 /* iv offset is 0 */
1479 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1483 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1488 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1489 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1492 for (j = 0; j < 4; j++)
1493 iv[j] = iv_s[3 - j];
1495 /* ZUC doesn't need a swap */
1496 for (j = 0; j < 4; j++)
1501 * GP op header, lengths are expected in bits.
1503 vq_cmd_w0.s.param1 = encr_data_len;
1504 vq_cmd_w0.s.param2 = auth_data_len;
1507 * In 83XX since we have a limitation of
1508 * IV & Offset control word not part of instruction
1509 * and need to be part of Data Buffer, we check if
1510 * head room is there and then only do the Direct mode processing
1512 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1513 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1514 void *dm_vaddr = params->bufs[0].vaddr;
1515 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1517 * This flag indicates that there is 24 bytes head room and
1518 * 8 bytes tail room available, so that we get to do
1519 * DIRECT MODE with limitation
1522 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1523 OFF_CTRL_LEN - iv_len);
1524 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1527 req->ist.ei1 = offset_dma;
1528 /* RPTR should just exclude offset control word */
1529 req->ist.ei2 = dm_dma_addr - iv_len;
1530 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1531 + outputlen - iv_len);
1533 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1535 if (likely(iv_len)) {
1536 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1538 memcpy(iv_d, iv, 16);
1541 *offset_vaddr = offset_ctrl;
1543 uint32_t i, g_size_bytes, s_size_bytes;
1544 uint64_t dptr_dma, rptr_dma;
1545 sg_comp_t *gather_comp;
1546 sg_comp_t *scatter_comp;
1550 /* save space for iv */
1551 offset_vaddr = m_vaddr;
1554 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1555 m_dma += OFF_CTRL_LEN + iv_len;
1557 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1559 /* DPTR has SG list */
1560 in_buffer = m_vaddr;
1563 ((uint16_t *)in_buffer)[0] = 0;
1564 ((uint16_t *)in_buffer)[1] = 0;
1566 /* TODO Add error check if space will be sufficient */
1567 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1574 /* Offset control word followed by iv */
1576 i = fill_sg_comp(gather_comp, i, offset_dma,
1577 OFF_CTRL_LEN + iv_len);
1579 /* iv offset is 0 */
1580 *offset_vaddr = offset_ctrl;
1582 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1583 memcpy(iv_d, iv, 16);
1586 size = inputlen - iv_len;
1588 i = fill_sg_comp_from_iov(gather_comp, i,
1591 if (unlikely(size)) {
1592 CPT_LOG_DP_ERR("Insufficient buffer space,"
1593 " size %d needed", size);
1597 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1598 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1601 * Output Scatter List
1606 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1609 /* IV in SLIST only for EEA3 & UEA2 */
1614 i = fill_sg_comp(scatter_comp, i,
1615 offset_dma + OFF_CTRL_LEN, iv_len);
1618 /* Add output data */
1619 if (req_flags & VALID_MAC_BUF) {
1620 size = outputlen - iv_len - mac_len;
1622 i = fill_sg_comp_from_iov(scatter_comp, i,
1626 if (unlikely(size)) {
1627 CPT_LOG_DP_ERR("Insufficient buffer space,"
1628 " size %d needed", size);
1635 i = fill_sg_comp_from_buf(scatter_comp, i,
1639 /* Output including mac */
1640 size = outputlen - iv_len;
1642 i = fill_sg_comp_from_iov(scatter_comp, i,
1646 if (unlikely(size)) {
1647 CPT_LOG_DP_ERR("Insufficient buffer space,"
1648 " size %d needed", size);
1653 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1654 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1656 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1658 /* This is DPTR len incase of SG mode */
1659 vq_cmd_w0.s.dlen = size;
1661 m_vaddr = (uint8_t *)m_vaddr + size;
1664 /* cpt alternate completion address saved earlier */
1665 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1666 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1667 rptr_dma = c_dma - 8;
1669 req->ist.ei1 = dptr_dma;
1670 req->ist.ei2 = rptr_dma;
1673 /* 16 byte aligned cpt res address */
1674 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1675 *req->completion_addr = COMPLETION_CODE_INIT;
1676 req->comp_baddr = c_dma;
1678 /* Fill microcode part of instruction */
1679 req->ist.ei0 = vq_cmd_w0.u64;
1687 static __rte_always_inline void
1688 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1691 fc_params_t *params,
1696 int32_t inputlen = 0, outputlen;
1697 struct cpt_ctx *cpt_ctx;
1698 uint8_t snow3g, iv_len = 16;
1699 struct cpt_request_info *req;
1701 uint32_t encr_offset;
1702 uint32_t encr_data_len;
1704 void *m_vaddr, *c_vaddr;
1705 uint64_t m_dma, c_dma;
1706 uint64_t *offset_vaddr, offset_dma;
1707 uint32_t *iv_s, iv[4], j;
1708 vq_cmd_word0_t vq_cmd_w0;
1710 buf_p = ¶ms->meta_buf;
1711 m_vaddr = buf_p->vaddr;
1712 m_dma = buf_p->dma_addr;
1715 * Microcode expects offsets in bytes
1716 * TODO: Rounding off
1718 encr_offset = ENCR_OFFSET(d_offs) / 8;
1719 encr_data_len = ENCR_DLEN(d_lens);
1721 cpt_ctx = params->ctx_buf.vaddr;
1722 flags = cpt_ctx->zsk_flags;
1723 snow3g = cpt_ctx->snow3g;
1725 * Save initial space that followed app data for completion code &
1726 * alternate completion code to fall in same cache line as app data
1728 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1729 m_dma += COMPLETION_CODE_SIZE;
1730 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1733 c_vaddr = (uint8_t *)m_vaddr + size;
1734 c_dma = m_dma + size;
1735 size += sizeof(cpt_res_s_t);
1737 m_vaddr = (uint8_t *)m_vaddr + size;
1740 /* Reserve memory for cpt request info */
1743 size = sizeof(struct cpt_request_info);
1744 m_vaddr = (uint8_t *)m_vaddr + size;
1748 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1750 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1752 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1753 (0 << 3) | (flags & 0x7));
1755 /* consider iv len */
1756 encr_offset += iv_len;
1758 inputlen = encr_offset +
1759 (RTE_ALIGN(encr_data_len, 8) / 8);
1760 outputlen = inputlen;
1763 iv_s = params->iv_buf;
1766 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1767 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1770 for (j = 0; j < 4; j++)
1771 iv[j] = iv_s[3 - j];
1773 /* ZUC doesn't need a swap */
1774 for (j = 0; j < 4; j++)
1779 * GP op header, lengths are expected in bits.
1781 vq_cmd_w0.s.param1 = encr_data_len;
1784 * In 83XX since we have a limitation of
1785 * IV & Offset control word not part of instruction
1786 * and need to be part of Data Buffer, we check if
1787 * head room is there and then only do the Direct mode processing
1789 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1790 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1791 void *dm_vaddr = params->bufs[0].vaddr;
1792 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1794 * This flag indicates that there is 24 bytes head room and
1795 * 8 bytes tail room available, so that we get to do
1796 * DIRECT MODE with limitation
1799 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1800 OFF_CTRL_LEN - iv_len);
1801 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1804 req->ist.ei1 = offset_dma;
1805 /* RPTR should just exclude offset control word */
1806 req->ist.ei2 = dm_dma_addr - iv_len;
1807 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1808 + outputlen - iv_len);
1810 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1812 if (likely(iv_len)) {
1813 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1815 memcpy(iv_d, iv, 16);
1818 /* iv offset is 0 */
1819 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1821 uint32_t i, g_size_bytes, s_size_bytes;
1822 uint64_t dptr_dma, rptr_dma;
1823 sg_comp_t *gather_comp;
1824 sg_comp_t *scatter_comp;
1828 /* save space for offset and iv... */
1829 offset_vaddr = m_vaddr;
1832 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1833 m_dma += OFF_CTRL_LEN + iv_len;
1835 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1837 /* DPTR has SG list */
1838 in_buffer = m_vaddr;
1841 ((uint16_t *)in_buffer)[0] = 0;
1842 ((uint16_t *)in_buffer)[1] = 0;
1844 /* TODO Add error check if space will be sufficient */
1845 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1852 /* Offset control word */
1854 /* iv offset is 0 */
1855 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1857 i = fill_sg_comp(gather_comp, i, offset_dma,
1858 OFF_CTRL_LEN + iv_len);
1860 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1861 memcpy(iv_d, iv, 16);
1863 /* Add input data */
1864 size = inputlen - iv_len;
1866 i = fill_sg_comp_from_iov(gather_comp, i,
1869 if (unlikely(size)) {
1870 CPT_LOG_DP_ERR("Insufficient buffer space,"
1871 " size %d needed", size);
1875 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1876 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1879 * Output Scatter List
1884 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1887 i = fill_sg_comp(scatter_comp, i,
1888 offset_dma + OFF_CTRL_LEN,
1891 /* Add output data */
1892 size = outputlen - iv_len;
1894 i = fill_sg_comp_from_iov(scatter_comp, i,
1898 if (unlikely(size)) {
1899 CPT_LOG_DP_ERR("Insufficient buffer space,"
1900 " size %d needed", size);
1904 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1905 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1907 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1909 /* This is DPTR len incase of SG mode */
1910 vq_cmd_w0.s.dlen = size;
1912 m_vaddr = (uint8_t *)m_vaddr + size;
1915 /* cpt alternate completion address saved earlier */
1916 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1917 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1918 rptr_dma = c_dma - 8;
1920 req->ist.ei1 = dptr_dma;
1921 req->ist.ei2 = rptr_dma;
1924 /* 16 byte aligned cpt res address */
1925 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1926 *req->completion_addr = COMPLETION_CODE_INIT;
1927 req->comp_baddr = c_dma;
1929 /* Fill microcode part of instruction */
1930 req->ist.ei0 = vq_cmd_w0.u64;
1938 static __rte_always_inline void
1939 cpt_kasumi_enc_prep(uint32_t req_flags,
1942 fc_params_t *params,
1947 int32_t inputlen = 0, outputlen = 0;
1948 struct cpt_ctx *cpt_ctx;
1949 uint32_t mac_len = 0;
1951 struct cpt_request_info *req;
1953 uint32_t encr_offset, auth_offset;
1954 uint32_t encr_data_len, auth_data_len;
1956 uint8_t *iv_s, *iv_d, iv_len = 8;
1958 void *m_vaddr, *c_vaddr;
1959 uint64_t m_dma, c_dma;
1960 uint64_t *offset_vaddr, offset_dma;
1961 vq_cmd_word0_t vq_cmd_w0;
1963 uint32_t g_size_bytes, s_size_bytes;
1964 uint64_t dptr_dma, rptr_dma;
1965 sg_comp_t *gather_comp;
1966 sg_comp_t *scatter_comp;
1968 buf_p = ¶ms->meta_buf;
1969 m_vaddr = buf_p->vaddr;
1970 m_dma = buf_p->dma_addr;
1972 encr_offset = ENCR_OFFSET(d_offs) / 8;
1973 auth_offset = AUTH_OFFSET(d_offs) / 8;
1974 encr_data_len = ENCR_DLEN(d_lens);
1975 auth_data_len = AUTH_DLEN(d_lens);
1977 cpt_ctx = params->ctx_buf.vaddr;
1978 flags = cpt_ctx->zsk_flags;
1979 mac_len = cpt_ctx->mac_len;
1982 iv_s = params->iv_buf;
1984 iv_s = params->auth_iv_buf;
1986 dir = iv_s[8] & 0x1;
1989 * Save initial space that followed app data for completion code &
1990 * alternate completion code to fall in same cache line as app data
1992 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1993 m_dma += COMPLETION_CODE_SIZE;
1994 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1997 c_vaddr = (uint8_t *)m_vaddr + size;
1998 c_dma = m_dma + size;
1999 size += sizeof(cpt_res_s_t);
2001 m_vaddr = (uint8_t *)m_vaddr + size;
2004 /* Reserve memory for cpt request info */
2007 size = sizeof(struct cpt_request_info);
2008 m_vaddr = (uint8_t *)m_vaddr + size;
2011 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2013 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2014 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2015 (dir << 4) | (0 << 3) | (flags & 0x7));
2018 * GP op header, lengths are expected in bits.
2020 vq_cmd_w0.s.param1 = encr_data_len;
2021 vq_cmd_w0.s.param2 = auth_data_len;
2023 /* consider iv len */
2025 encr_offset += iv_len;
2026 auth_offset += iv_len;
2029 /* save space for offset ctrl and iv */
2030 offset_vaddr = m_vaddr;
2033 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2034 m_dma += OFF_CTRL_LEN + iv_len;
2036 /* DPTR has SG list */
2037 in_buffer = m_vaddr;
2040 ((uint16_t *)in_buffer)[0] = 0;
2041 ((uint16_t *)in_buffer)[1] = 0;
2043 /* TODO Add error check if space will be sufficient */
2044 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2051 /* Offset control word followed by iv */
2054 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2055 outputlen = inputlen;
2056 /* iv offset is 0 */
2057 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2059 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2060 outputlen = mac_len;
2061 /* iv offset is 0 */
2062 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2065 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2068 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2069 memcpy(iv_d, iv_s, iv_len);
2072 size = inputlen - iv_len;
2074 i = fill_sg_comp_from_iov(gather_comp, i,
2078 if (unlikely(size)) {
2079 CPT_LOG_DP_ERR("Insufficient buffer space,"
2080 " size %d needed", size);
2084 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2085 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2088 * Output Scatter List
2092 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2095 /* IV in SLIST only for F8 */
2101 i = fill_sg_comp(scatter_comp, i,
2102 offset_dma + OFF_CTRL_LEN,
2106 /* Add output data */
2107 if (req_flags & VALID_MAC_BUF) {
2108 size = outputlen - iv_len - mac_len;
2110 i = fill_sg_comp_from_iov(scatter_comp, i,
2114 if (unlikely(size)) {
2115 CPT_LOG_DP_ERR("Insufficient buffer space,"
2116 " size %d needed", size);
2123 i = fill_sg_comp_from_buf(scatter_comp, i,
2127 /* Output including mac */
2128 size = outputlen - iv_len;
2130 i = fill_sg_comp_from_iov(scatter_comp, i,
2134 if (unlikely(size)) {
2135 CPT_LOG_DP_ERR("Insufficient buffer space,"
2136 " size %d needed", size);
2141 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2142 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2144 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2146 /* This is DPTR len incase of SG mode */
2147 vq_cmd_w0.s.dlen = size;
2149 m_vaddr = (uint8_t *)m_vaddr + size;
2152 /* cpt alternate completion address saved earlier */
2153 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2154 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2155 rptr_dma = c_dma - 8;
2157 req->ist.ei1 = dptr_dma;
2158 req->ist.ei2 = rptr_dma;
2160 /* 16 byte aligned cpt res address */
2161 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2162 *req->completion_addr = COMPLETION_CODE_INIT;
2163 req->comp_baddr = c_dma;
2165 /* Fill microcode part of instruction */
2166 req->ist.ei0 = vq_cmd_w0.u64;
2174 static __rte_always_inline void
2175 cpt_kasumi_dec_prep(uint64_t d_offs,
2177 fc_params_t *params,
2182 int32_t inputlen = 0, outputlen;
2183 struct cpt_ctx *cpt_ctx;
2184 uint8_t i = 0, iv_len = 8;
2185 struct cpt_request_info *req;
2187 uint32_t encr_offset;
2188 uint32_t encr_data_len;
2191 void *m_vaddr, *c_vaddr;
2192 uint64_t m_dma, c_dma;
2193 uint64_t *offset_vaddr, offset_dma;
2194 vq_cmd_word0_t vq_cmd_w0;
2196 uint32_t g_size_bytes, s_size_bytes;
2197 uint64_t dptr_dma, rptr_dma;
2198 sg_comp_t *gather_comp;
2199 sg_comp_t *scatter_comp;
2201 buf_p = ¶ms->meta_buf;
2202 m_vaddr = buf_p->vaddr;
2203 m_dma = buf_p->dma_addr;
2205 encr_offset = ENCR_OFFSET(d_offs) / 8;
2206 encr_data_len = ENCR_DLEN(d_lens);
2208 cpt_ctx = params->ctx_buf.vaddr;
2209 flags = cpt_ctx->zsk_flags;
2211 * Save initial space that followed app data for completion code &
2212 * alternate completion code to fall in same cache line as app data
2214 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2215 m_dma += COMPLETION_CODE_SIZE;
2216 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2219 c_vaddr = (uint8_t *)m_vaddr + size;
2220 c_dma = m_dma + size;
2221 size += sizeof(cpt_res_s_t);
2223 m_vaddr = (uint8_t *)m_vaddr + size;
2226 /* Reserve memory for cpt request info */
2229 size = sizeof(struct cpt_request_info);
2230 m_vaddr = (uint8_t *)m_vaddr + size;
2234 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2236 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2237 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2238 (dir << 4) | (0 << 3) | (flags & 0x7));
2241 * GP op header, lengths are expected in bits.
2243 vq_cmd_w0.s.param1 = encr_data_len;
2245 /* consider iv len */
2246 encr_offset += iv_len;
2248 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2249 outputlen = inputlen;
2251 /* save space for offset ctrl & iv */
2252 offset_vaddr = m_vaddr;
2255 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2256 m_dma += OFF_CTRL_LEN + iv_len;
2258 /* DPTR has SG list */
2259 in_buffer = m_vaddr;
2262 ((uint16_t *)in_buffer)[0] = 0;
2263 ((uint16_t *)in_buffer)[1] = 0;
2265 /* TODO Add error check if space will be sufficient */
2266 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2273 /* Offset control word followed by iv */
2274 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2276 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2279 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2280 params->iv_buf, iv_len);
2282 /* Add input data */
2283 size = inputlen - iv_len;
2285 i = fill_sg_comp_from_iov(gather_comp, i,
2288 if (unlikely(size)) {
2289 CPT_LOG_DP_ERR("Insufficient buffer space,"
2290 " size %d needed", size);
2294 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2295 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2298 * Output Scatter List
2302 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2305 i = fill_sg_comp(scatter_comp, i,
2306 offset_dma + OFF_CTRL_LEN,
2309 /* Add output data */
2310 size = outputlen - iv_len;
2312 i = fill_sg_comp_from_iov(scatter_comp, i,
2315 if (unlikely(size)) {
2316 CPT_LOG_DP_ERR("Insufficient buffer space,"
2317 " size %d needed", size);
2321 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2322 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2324 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2326 /* This is DPTR len incase of SG mode */
2327 vq_cmd_w0.s.dlen = size;
2329 m_vaddr = (uint8_t *)m_vaddr + size;
2332 /* cpt alternate completion address saved earlier */
2333 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2334 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2335 rptr_dma = c_dma - 8;
2337 req->ist.ei1 = dptr_dma;
2338 req->ist.ei2 = rptr_dma;
2340 /* 16 byte aligned cpt res address */
2341 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2342 *req->completion_addr = COMPLETION_CODE_INIT;
2343 req->comp_baddr = c_dma;
2345 /* Fill microcode part of instruction */
2346 req->ist.ei0 = vq_cmd_w0.u64;
2354 static __rte_always_inline void *
2355 cpt_fc_dec_hmac_prep(uint32_t flags,
2358 fc_params_t *fc_params,
2361 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2363 void *prep_req = NULL;
2365 fc_type = ctx->fc_type;
2367 if (likely(fc_type == FC_GEN)) {
2368 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2370 } else if (fc_type == ZUC_SNOW3G) {
2371 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2373 } else if (fc_type == KASUMI) {
2374 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2378 * For AUTH_ONLY case,
2379 * MC only supports digest generation and verification
2380 * should be done in software by memcmp()
2386 static __rte_always_inline void *__rte_hot
2387 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2388 fc_params_t *fc_params, void *op)
2390 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2392 void *prep_req = NULL;
2394 fc_type = ctx->fc_type;
2396 /* Common api for rest of the ops */
2397 if (likely(fc_type == FC_GEN)) {
2398 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2400 } else if (fc_type == ZUC_SNOW3G) {
2401 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2403 } else if (fc_type == KASUMI) {
2404 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2406 } else if (fc_type == HASH_HMAC) {
2407 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2413 static __rte_always_inline int
2414 cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
2415 const uint8_t *key, uint16_t key_len, uint16_t mac_len)
2417 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
2418 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
2419 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
2421 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2426 /* No support for AEAD yet */
2427 if (cpt_ctx->enc_cipher)
2429 /* For ZUC/SNOW3G/Kasumi */
2432 cpt_ctx->snow3g = 1;
2433 gen_key_snow3g(key, keyx);
2434 memcpy(zs_ctx->ci_key, keyx, key_len);
2435 cpt_ctx->fc_type = ZUC_SNOW3G;
2436 cpt_ctx->zsk_flags = 0x1;
2439 cpt_ctx->snow3g = 0;
2440 memcpy(zs_ctx->ci_key, key, key_len);
2441 memcpy(zs_ctx->zuc_const, zuc_d, 32);
2442 cpt_ctx->fc_type = ZUC_SNOW3G;
2443 cpt_ctx->zsk_flags = 0x1;
2446 /* Kasumi ECB mode */
2448 memcpy(k_ctx->ci_key, key, key_len);
2449 cpt_ctx->fc_type = KASUMI;
2450 cpt_ctx->zsk_flags = 0x1;
2453 memcpy(k_ctx->ci_key, key, key_len);
2454 cpt_ctx->fc_type = KASUMI;
2455 cpt_ctx->zsk_flags = 0x1;
2460 cpt_ctx->mac_len = 4;
2461 cpt_ctx->hash_type = type;
2465 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2466 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2467 cpt_ctx->fc_type = HASH_HMAC;
2470 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2473 /* For GMAC auth, cipher must be NULL */
2474 if (type == GMAC_TYPE)
2475 fctx->enc.enc_cipher = 0;
2477 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2478 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2482 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2483 memcpy(cpt_ctx->auth_key, key, key_len);
2484 cpt_ctx->auth_key_len = key_len;
2485 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2486 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2489 memcpy(fctx->hmac.opad, key, key_len);
2490 fctx->enc.auth_input_type = 1;
2495 static __rte_always_inline int
2496 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2497 struct cpt_sess_misc *sess)
2499 struct rte_crypto_aead_xform *aead_form;
2500 cipher_type_t enc_type = 0; /* NULL Cipher type */
2501 auth_type_t auth_type = 0; /* NULL Auth type */
2502 uint32_t cipher_key_len = 0;
2503 uint8_t aes_gcm = 0;
2504 aead_form = &xform->aead;
2505 void *ctx = SESS_PRIV(sess);
2507 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2508 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2509 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2510 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2511 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2512 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2514 CPT_LOG_DP_ERR("Unknown aead operation\n");
2517 switch (aead_form->algo) {
2518 case RTE_CRYPTO_AEAD_AES_GCM:
2520 cipher_key_len = 16;
2523 case RTE_CRYPTO_AEAD_AES_CCM:
2524 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2527 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2528 enc_type = CHACHA20;
2529 auth_type = POLY1305;
2530 cipher_key_len = 32;
2531 sess->chacha_poly = 1;
2534 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2538 if (aead_form->key.length < cipher_key_len) {
2539 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2540 (unsigned int long)aead_form->key.length);
2544 sess->aes_gcm = aes_gcm;
2545 sess->mac_len = aead_form->digest_length;
2546 sess->iv_offset = aead_form->iv.offset;
2547 sess->iv_length = aead_form->iv.length;
2548 sess->aad_length = aead_form->aad_length;
2550 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2551 aead_form->key.length, NULL)))
2554 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2555 aead_form->digest_length)))
2561 static __rte_always_inline int
2562 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2563 struct cpt_sess_misc *sess)
2565 struct rte_crypto_cipher_xform *c_form;
2566 cipher_type_t enc_type = 0; /* NULL Cipher type */
2567 uint32_t cipher_key_len = 0;
2568 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2570 c_form = &xform->cipher;
2572 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2573 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2574 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2575 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2577 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2581 switch (c_form->algo) {
2582 case RTE_CRYPTO_CIPHER_AES_CBC:
2584 cipher_key_len = 16;
2586 case RTE_CRYPTO_CIPHER_3DES_CBC:
2587 enc_type = DES3_CBC;
2588 cipher_key_len = 24;
2590 case RTE_CRYPTO_CIPHER_DES_CBC:
2591 /* DES is implemented using 3DES in hardware */
2592 enc_type = DES3_CBC;
2595 case RTE_CRYPTO_CIPHER_AES_CTR:
2597 cipher_key_len = 16;
2600 case RTE_CRYPTO_CIPHER_NULL:
2604 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2605 enc_type = KASUMI_F8_ECB;
2606 cipher_key_len = 16;
2609 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2610 enc_type = SNOW3G_UEA2;
2611 cipher_key_len = 16;
2614 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2615 enc_type = ZUC_EEA3;
2616 cipher_key_len = 16;
2619 case RTE_CRYPTO_CIPHER_AES_XTS:
2621 cipher_key_len = 16;
2623 case RTE_CRYPTO_CIPHER_3DES_ECB:
2624 enc_type = DES3_ECB;
2625 cipher_key_len = 24;
2627 case RTE_CRYPTO_CIPHER_AES_ECB:
2629 cipher_key_len = 16;
2631 case RTE_CRYPTO_CIPHER_3DES_CTR:
2632 case RTE_CRYPTO_CIPHER_AES_F8:
2633 case RTE_CRYPTO_CIPHER_ARC4:
2634 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2638 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2643 if (c_form->key.length < cipher_key_len) {
2644 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2645 (unsigned long) c_form->key.length);
2649 sess->zsk_flag = zsk_flag;
2651 sess->aes_ctr = aes_ctr;
2652 sess->iv_offset = c_form->iv.offset;
2653 sess->iv_length = c_form->iv.length;
2654 sess->is_null = is_null;
2656 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2657 c_form->key.data, c_form->key.length, NULL)))
2663 static __rte_always_inline int
2664 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2665 struct cpt_sess_misc *sess)
2667 struct rte_crypto_auth_xform *a_form;
2668 auth_type_t auth_type = 0; /* NULL Auth type */
2669 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2671 a_form = &xform->auth;
2673 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2674 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2675 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2676 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2678 CPT_LOG_DP_ERR("Unknown auth operation");
2682 switch (a_form->algo) {
2683 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2685 case RTE_CRYPTO_AUTH_SHA1:
2686 auth_type = SHA1_TYPE;
2688 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2689 case RTE_CRYPTO_AUTH_SHA256:
2690 auth_type = SHA2_SHA256;
2692 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2693 case RTE_CRYPTO_AUTH_SHA512:
2694 auth_type = SHA2_SHA512;
2696 case RTE_CRYPTO_AUTH_AES_GMAC:
2697 auth_type = GMAC_TYPE;
2700 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2701 case RTE_CRYPTO_AUTH_SHA224:
2702 auth_type = SHA2_SHA224;
2704 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2705 case RTE_CRYPTO_AUTH_SHA384:
2706 auth_type = SHA2_SHA384;
2708 case RTE_CRYPTO_AUTH_MD5_HMAC:
2709 case RTE_CRYPTO_AUTH_MD5:
2710 auth_type = MD5_TYPE;
2712 case RTE_CRYPTO_AUTH_KASUMI_F9:
2713 auth_type = KASUMI_F9_ECB;
2715 * Indicate that direction needs to be taken out
2720 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2721 auth_type = SNOW3G_UIA2;
2724 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2725 auth_type = ZUC_EIA3;
2728 case RTE_CRYPTO_AUTH_NULL:
2732 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2733 case RTE_CRYPTO_AUTH_AES_CMAC:
2734 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2735 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2739 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2744 sess->zsk_flag = zsk_flag;
2745 sess->aes_gcm = aes_gcm;
2746 sess->mac_len = a_form->digest_length;
2747 sess->is_null = is_null;
2749 sess->auth_iv_offset = a_form->iv.offset;
2750 sess->auth_iv_length = a_form->iv.length;
2752 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2753 a_form->key.data, a_form->key.length,
2754 a_form->digest_length)))
2760 static __rte_always_inline int
2761 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2762 struct cpt_sess_misc *sess)
2764 struct rte_crypto_auth_xform *a_form;
2765 cipher_type_t enc_type = 0; /* NULL Cipher type */
2766 auth_type_t auth_type = 0; /* NULL Auth type */
2767 void *ctx = SESS_PRIV(sess);
2769 a_form = &xform->auth;
2771 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2772 sess->cpt_op |= CPT_OP_ENCODE;
2773 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2774 sess->cpt_op |= CPT_OP_DECODE;
2776 CPT_LOG_DP_ERR("Unknown auth operation");
2780 switch (a_form->algo) {
2781 case RTE_CRYPTO_AUTH_AES_GMAC:
2783 auth_type = GMAC_TYPE;
2786 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2794 sess->iv_offset = a_form->iv.offset;
2795 sess->iv_length = a_form->iv.length;
2796 sess->mac_len = a_form->digest_length;
2798 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2799 a_form->key.length, NULL)))
2802 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2803 a_form->digest_length)))
2809 static __rte_always_inline void *
2810 alloc_op_meta(struct rte_mbuf *m_src,
2813 struct rte_mempool *cpt_meta_pool)
2817 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2818 if (likely(m_src && (m_src->nb_segs == 1))) {
2822 /* Check if tailroom is sufficient to hold meta data */
2823 tailroom = rte_pktmbuf_tailroom(m_src);
2824 if (likely(tailroom > len + 8)) {
2825 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2826 mphys = m_src->buf_iova + m_src->buf_len;
2830 buf->dma_addr = mphys;
2832 /* Indicate that this is a mbuf allocated mdata */
2833 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2838 RTE_SET_USED(m_src);
2841 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2845 buf->dma_addr = rte_mempool_virt2iova(mdata);
2852 * cpt_free_metabuf - free metabuf to mempool.
2853 * @param instance: pointer to instance.
2854 * @param objp: pointer to the metabuf.
2856 static __rte_always_inline void
2857 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2859 bool nofree = ((uintptr_t)mdata & 1ull);
2863 rte_mempool_put(cpt_meta_pool, mdata);
2866 static __rte_always_inline uint32_t
2867 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2868 iov_ptr_t *iovec, uint32_t start_offset)
2871 void *seg_data = NULL;
2872 phys_addr_t seg_phys;
2873 int32_t seg_size = 0;
2880 if (!start_offset) {
2881 seg_data = rte_pktmbuf_mtod(pkt, void *);
2882 seg_phys = rte_pktmbuf_iova(pkt);
2883 seg_size = pkt->data_len;
2885 while (start_offset >= pkt->data_len) {
2886 start_offset -= pkt->data_len;
2890 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2891 seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2892 seg_size = pkt->data_len - start_offset;
2898 iovec->bufs[index].vaddr = seg_data;
2899 iovec->bufs[index].dma_addr = seg_phys;
2900 iovec->bufs[index].size = seg_size;
2904 while (unlikely(pkt != NULL)) {
2905 seg_data = rte_pktmbuf_mtod(pkt, void *);
2906 seg_phys = rte_pktmbuf_iova(pkt);
2907 seg_size = pkt->data_len;
2911 iovec->bufs[index].vaddr = seg_data;
2912 iovec->bufs[index].dma_addr = seg_phys;
2913 iovec->bufs[index].size = seg_size;
2920 iovec->buf_cnt = index;
2924 static __rte_always_inline uint32_t
2925 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2930 void *seg_data = NULL;
2931 phys_addr_t seg_phys;
2932 uint32_t seg_size = 0;
2935 seg_data = rte_pktmbuf_mtod(pkt, void *);
2936 seg_phys = rte_pktmbuf_iova(pkt);
2937 seg_size = pkt->data_len;
2940 if (likely(!pkt->next)) {
2941 uint32_t headroom, tailroom;
2943 *flags |= SINGLE_BUF_INPLACE;
2944 headroom = rte_pktmbuf_headroom(pkt);
2945 tailroom = rte_pktmbuf_tailroom(pkt);
2946 if (likely((headroom >= 24) &&
2948 /* In 83XX this is prerequivisit for Direct mode */
2949 *flags |= SINGLE_BUF_HEADTAILROOM;
2951 param->bufs[0].vaddr = seg_data;
2952 param->bufs[0].dma_addr = seg_phys;
2953 param->bufs[0].size = seg_size;
2956 iovec = param->src_iov;
2957 iovec->bufs[index].vaddr = seg_data;
2958 iovec->bufs[index].dma_addr = seg_phys;
2959 iovec->bufs[index].size = seg_size;
2963 while (unlikely(pkt != NULL)) {
2964 seg_data = rte_pktmbuf_mtod(pkt, void *);
2965 seg_phys = rte_pktmbuf_iova(pkt);
2966 seg_size = pkt->data_len;
2971 iovec->bufs[index].vaddr = seg_data;
2972 iovec->bufs[index].dma_addr = seg_phys;
2973 iovec->bufs[index].size = seg_size;
2980 iovec->buf_cnt = index;
2984 static __rte_always_inline int
2985 fill_fc_params(struct rte_crypto_op *cop,
2986 struct cpt_sess_misc *sess_misc,
2987 struct cpt_qp_meta_info *m_info,
2992 struct rte_crypto_sym_op *sym_op = cop->sym;
2995 uint32_t mc_hash_off;
2997 uint64_t d_offs, d_lens;
2998 struct rte_mbuf *m_src, *m_dst;
2999 uint8_t cpt_op = sess_misc->cpt_op;
3000 #ifdef CPT_ALWAYS_USE_SG_MODE
3001 uint8_t inplace = 0;
3003 uint8_t inplace = 1;
3005 fc_params_t fc_params;
3006 char src[SRC_IOV_SIZE];
3007 char dst[SRC_IOV_SIZE];
3011 if (likely(sess_misc->iv_length)) {
3012 flags |= VALID_IV_BUF;
3013 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3014 uint8_t *, sess_misc->iv_offset);
3015 if (sess_misc->aes_ctr &&
3016 unlikely(sess_misc->iv_length != 16)) {
3017 memcpy((uint8_t *)iv_buf,
3018 rte_crypto_op_ctod_offset(cop,
3019 uint8_t *, sess_misc->iv_offset), 12);
3020 iv_buf[3] = rte_cpu_to_be_32(0x1);
3021 fc_params.iv_buf = iv_buf;
3025 if (sess_misc->zsk_flag) {
3026 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3028 sess_misc->auth_iv_offset);
3029 if (sess_misc->zsk_flag != ZS_EA)
3032 m_src = sym_op->m_src;
3033 m_dst = sym_op->m_dst;
3035 if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3040 d_offs = sym_op->aead.data.offset;
3041 d_lens = sym_op->aead.data.length;
3042 mc_hash_off = sym_op->aead.data.offset +
3043 sym_op->aead.data.length;
3045 aad_data = sym_op->aead.aad.data;
3046 aad_len = sess_misc->aad_length;
3047 if (likely((aad_data + aad_len) ==
3048 rte_pktmbuf_mtod_offset(m_src,
3050 sym_op->aead.data.offset))) {
3051 d_offs = (d_offs - aad_len) | (d_offs << 16);
3052 d_lens = (d_lens + aad_len) | (d_lens << 32);
3054 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3055 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3056 fc_params.aad_buf.size = aad_len;
3057 flags |= VALID_AAD_BUF;
3059 d_offs = d_offs << 16;
3060 d_lens = d_lens << 32;
3063 salt = fc_params.iv_buf;
3064 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3065 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3066 sess_misc->salt = *(uint32_t *)salt;
3068 fc_params.iv_buf = salt + 4;
3069 if (likely(sess_misc->mac_len)) {
3070 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3076 /* hmac immediately following data is best case */
3077 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3079 (uint8_t *)sym_op->aead.digest.data)) {
3080 flags |= VALID_MAC_BUF;
3081 fc_params.mac_buf.size = sess_misc->mac_len;
3082 fc_params.mac_buf.vaddr =
3083 sym_op->aead.digest.data;
3084 fc_params.mac_buf.dma_addr =
3085 sym_op->aead.digest.phys_addr;
3090 d_offs = sym_op->cipher.data.offset;
3091 d_lens = sym_op->cipher.data.length;
3092 mc_hash_off = sym_op->cipher.data.offset +
3093 sym_op->cipher.data.length;
3094 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3095 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3097 if (mc_hash_off < (sym_op->auth.data.offset +
3098 sym_op->auth.data.length)){
3099 mc_hash_off = (sym_op->auth.data.offset +
3100 sym_op->auth.data.length);
3102 /* for gmac, salt should be updated like in gcm */
3103 if (unlikely(sess_misc->is_gmac)) {
3105 salt = fc_params.iv_buf;
3106 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3107 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3108 sess_misc->salt = *(uint32_t *)salt;
3110 fc_params.iv_buf = salt + 4;
3112 if (likely(sess_misc->mac_len)) {
3115 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3119 /* hmac immediately following data is best case */
3120 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3122 (uint8_t *)sym_op->auth.digest.data)) {
3123 flags |= VALID_MAC_BUF;
3124 fc_params.mac_buf.size =
3126 fc_params.mac_buf.vaddr =
3127 sym_op->auth.digest.data;
3128 fc_params.mac_buf.dma_addr =
3129 sym_op->auth.digest.phys_addr;
3134 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3135 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3137 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3140 if (likely(!m_dst && inplace)) {
3141 /* Case of single buffer without AAD buf or
3142 * separate mac buf in place and
3145 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3147 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3150 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3156 /* Out of place processing */
3157 fc_params.src_iov = (void *)src;
3158 fc_params.dst_iov = (void *)dst;
3160 /* Store SG I/O in the api for reuse */
3161 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3162 CPT_LOG_DP_ERR("Prepare src iov failed");
3167 if (unlikely(m_dst != NULL)) {
3170 /* Try to make room as much as src has */
3171 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3173 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3174 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3175 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3176 CPT_LOG_DP_ERR("Not enough space in "
3185 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3186 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3192 fc_params.dst_iov = (void *)src;
3196 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3197 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3198 m_info->lb_mlen, m_info->pool);
3200 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3201 m_info->sg_mlen, m_info->pool);
3203 if (unlikely(mdata == NULL)) {
3204 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3209 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3210 op[0] = (uintptr_t)mdata;
3211 op[1] = (uintptr_t)cop;
3212 op[2] = op[3] = 0; /* Used to indicate auth verify */
3213 space += 4 * sizeof(uint64_t);
3215 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3216 fc_params.meta_buf.dma_addr += space;
3217 fc_params.meta_buf.size -= space;
3219 /* Finally prepare the instruction */
3220 if (cpt_op & CPT_OP_ENCODE)
3221 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3224 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3227 if (unlikely(*prep_req == NULL)) {
3228 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3230 goto free_mdata_and_exit;
3237 free_mdata_and_exit:
3238 free_op_meta(mdata, m_info->pool);
3243 static __rte_always_inline void
3244 compl_auth_verify(struct rte_crypto_op *op,
3249 struct rte_crypto_sym_op *sym_op = op->sym;
3251 if (sym_op->auth.digest.data)
3252 mac = sym_op->auth.digest.data;
3254 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3256 sym_op->auth.data.length +
3257 sym_op->auth.data.offset);
3259 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3263 if (memcmp(mac, gen_mac, mac_len))
3264 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3266 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3269 static __rte_always_inline void
3270 find_kasumif9_direction_and_length(uint8_t *src,
3271 uint32_t counter_num_bytes,
3272 uint32_t *addr_length_in_bits,
3273 uint8_t *addr_direction)
3278 while (!found && counter_num_bytes > 0) {
3279 counter_num_bytes--;
3280 if (src[counter_num_bytes] == 0x00)
3282 pos = rte_bsf32(src[counter_num_bytes]);
3284 if (likely(counter_num_bytes > 0)) {
3285 last_byte = src[counter_num_bytes - 1];
3286 *addr_direction = last_byte & 0x1;
3287 *addr_length_in_bits = counter_num_bytes * 8
3291 last_byte = src[counter_num_bytes];
3292 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3293 *addr_length_in_bits = counter_num_bytes * 8
3301 * This handles all auth only except AES_GMAC
3303 static __rte_always_inline int
3304 fill_digest_params(struct rte_crypto_op *cop,
3305 struct cpt_sess_misc *sess,
3306 struct cpt_qp_meta_info *m_info,
3311 struct rte_crypto_sym_op *sym_op = cop->sym;
3315 uint32_t auth_range_off;
3317 uint64_t d_offs = 0, d_lens;
3318 struct rte_mbuf *m_src, *m_dst;
3319 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3320 uint16_t mac_len = sess->mac_len;
3322 char src[SRC_IOV_SIZE];
3326 memset(¶ms, 0, sizeof(fc_params_t));
3328 m_src = sym_op->m_src;
3330 /* For just digest lets force mempool alloc */
3331 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3333 if (mdata == NULL) {
3338 mphys = params.meta_buf.dma_addr;
3341 op[0] = (uintptr_t)mdata;
3342 op[1] = (uintptr_t)cop;
3343 op[2] = op[3] = 0; /* Used to indicate auth verify */
3344 space += 4 * sizeof(uint64_t);
3346 auth_range_off = sym_op->auth.data.offset;
3348 flags = VALID_MAC_BUF;
3349 params.src_iov = (void *)src;
3350 if (unlikely(sess->zsk_flag)) {
3352 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3353 * we will send pass through even for auth only case,
3356 d_offs = auth_range_off;
3358 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3359 uint8_t *, sess->auth_iv_offset);
3360 if (sess->zsk_flag == K_F9) {
3361 uint32_t length_in_bits, num_bytes;
3362 uint8_t *src, direction = 0;
3364 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3367 * This is kasumi f9, take direction from
3370 length_in_bits = cop->sym->auth.data.length;
3371 num_bytes = (length_in_bits >> 3);
3372 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3373 find_kasumif9_direction_and_length(src,
3377 length_in_bits -= 64;
3378 cop->sym->auth.data.offset += 64;
3379 d_offs = cop->sym->auth.data.offset;
3380 auth_range_off = d_offs / 8;
3381 cop->sym->auth.data.length = length_in_bits;
3383 /* Store it at end of auth iv */
3384 iv_buf[8] = direction;
3385 params.auth_iv_buf = iv_buf;
3389 d_lens = sym_op->auth.data.length;
3391 params.ctx_buf.vaddr = SESS_PRIV(sess);
3392 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3394 if (auth_op == CPT_OP_AUTH_GENERATE) {
3395 if (sym_op->auth.digest.data) {
3397 * Digest to be generated
3398 * in separate buffer
3400 params.mac_buf.size =
3402 params.mac_buf.vaddr =
3403 sym_op->auth.digest.data;
3404 params.mac_buf.dma_addr =
3405 sym_op->auth.digest.phys_addr;
3407 uint32_t off = sym_op->auth.data.offset +
3408 sym_op->auth.data.length;
3409 int32_t dlen, space;
3411 m_dst = sym_op->m_dst ?
3412 sym_op->m_dst : sym_op->m_src;
3413 dlen = rte_pktmbuf_pkt_len(m_dst);
3415 space = off + mac_len - dlen;
3417 if (!rte_pktmbuf_append(m_dst, space)) {
3418 CPT_LOG_DP_ERR("Failed to extend "
3419 "mbuf by %uB", space);
3421 goto free_mdata_and_exit;
3424 params.mac_buf.vaddr =
3425 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3426 params.mac_buf.dma_addr =
3427 rte_pktmbuf_iova_offset(m_dst, off);
3428 params.mac_buf.size = mac_len;
3431 /* Need space for storing generated mac */
3432 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3433 params.mac_buf.dma_addr = mphys + space;
3434 params.mac_buf.size = mac_len;
3435 space += RTE_ALIGN_CEIL(mac_len, 8);
3436 op[2] = (uintptr_t)params.mac_buf.vaddr;
3440 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3441 params.meta_buf.dma_addr = mphys + space;
3442 params.meta_buf.size -= space;
3444 /* Out of place processing */
3445 params.src_iov = (void *)src;
3447 /*Store SG I/O in the api for reuse */
3448 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3449 CPT_LOG_DP_ERR("Prepare src iov failed");
3451 goto free_mdata_and_exit;
3454 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3455 if (unlikely(*prep_req == NULL)) {
3457 goto free_mdata_and_exit;
3464 free_mdata_and_exit:
3465 free_op_meta(mdata, m_info->pool);
3470 #endif /*_CPT_UCODE_H_ */