1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline int
39 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 uint16_t mac_len = auth->digest_length;
45 case RTE_CRYPTO_AUTH_MD5:
46 case RTE_CRYPTO_AUTH_MD5_HMAC:
47 ret = (mac_len == 16) ? 0 : -1;
49 case RTE_CRYPTO_AUTH_SHA1:
50 case RTE_CRYPTO_AUTH_SHA1_HMAC:
51 ret = (mac_len == 20) ? 0 : -1;
53 case RTE_CRYPTO_AUTH_SHA224:
54 case RTE_CRYPTO_AUTH_SHA224_HMAC:
55 ret = (mac_len == 28) ? 0 : -1;
57 case RTE_CRYPTO_AUTH_SHA256:
58 case RTE_CRYPTO_AUTH_SHA256_HMAC:
59 ret = (mac_len == 32) ? 0 : -1;
61 case RTE_CRYPTO_AUTH_SHA384:
62 case RTE_CRYPTO_AUTH_SHA384_HMAC:
63 ret = (mac_len == 48) ? 0 : -1;
65 case RTE_CRYPTO_AUTH_SHA512:
66 case RTE_CRYPTO_AUTH_SHA512_HMAC:
67 ret = (mac_len == 64) ? 0 : -1;
69 case RTE_CRYPTO_AUTH_NULL:
79 static __rte_always_inline void
80 cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
83 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline int
88 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
100 static __rte_always_inline int
101 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
117 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 key_len = key_len / 2;
126 if (unlikely(key_len == 24)) {
127 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
130 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
136 if (unlikely(key_len != 16))
138 /* No support for AEAD yet */
139 if (unlikely(ctx->hash_type))
141 fc_type = ZUC_SNOW3G;
145 if (unlikely(key_len != 16))
147 /* No support for AEAD yet */
148 if (unlikely(ctx->hash_type))
156 ctx->fc_type = fc_type;
160 static __rte_always_inline void
161 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
163 cpt_ctx->enc_cipher = 0;
164 fctx->enc.enc_cipher = 0;
167 static __rte_always_inline void
168 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
170 mc_aes_type_t aes_key_type = 0;
173 aes_key_type = AES_128_BIT;
176 aes_key_type = AES_192_BIT;
179 aes_key_type = AES_256_BIT;
182 /* This should not happen */
183 CPT_LOG_DP_ERR("Invalid AES key len");
186 fctx->enc.aes_key = aes_key_type;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
197 gen_key_snow3g(key, keyx);
198 memcpy(zs_ctx->ci_key, keyx, key_len);
199 cpt_ctx->zsk_flags = 0;
202 static __rte_always_inline void
203 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
206 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
209 memcpy(zs_ctx->ci_key, key, key_len);
210 memcpy(zs_ctx->zuc_const, zuc_d, 32);
211 cpt_ctx->zsk_flags = 0;
214 static __rte_always_inline void
215 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
218 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
221 memcpy(k_ctx->ci_key, key, key_len);
222 cpt_ctx->zsk_flags = 0;
225 static __rte_always_inline void
226 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
229 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
231 memcpy(k_ctx->ci_key, key, key_len);
232 cpt_ctx->zsk_flags = 0;
235 static __rte_always_inline int
236 cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
237 const uint8_t *key, uint16_t key_len, uint8_t *salt)
239 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
242 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
246 if (cpt_ctx->fc_type == FC_GEN) {
248 * We need to always say IV is from DPTR as user can
249 * sometimes iverride IV per operation.
251 fctx->enc.iv_source = CPT_FROM_DPTR;
253 if (cpt_ctx->auth_key_len > 64)
259 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
262 /* CPT performs DES using 3DES with the 8B DES-key
263 * replicated 2 more times to match the 24B 3DES-key.
264 * Eg. If org. key is "0x0a 0x0b", then new key is
265 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
268 /* Skipping the first 8B as it will be copied
269 * in the regular code flow
271 memcpy(fctx->enc.encr_key+key_len, key, key_len);
272 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
276 /* For DES3_ECB IV need to be from CTX. */
277 fctx->enc.iv_source = CPT_FROM_CTX;
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 /* Even though iv source is from dptr,
288 * aes_gcm salt is taken from ctx
291 memcpy(fctx->enc.encr_iv, salt, 4);
292 /* Assuming it was just salt update
298 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
301 key_len = key_len / 2;
302 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
304 /* Copy key2 for XTS into ipad */
305 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
306 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
309 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
312 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
315 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
318 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
324 /* Only for FC_GEN case */
326 /* For GMAC auth, cipher must be NULL */
327 if (cpt_ctx->hash_type != GMAC_TYPE)
328 fctx->enc.enc_cipher = type;
330 memcpy(fctx->enc.encr_key, key, key_len);
333 cpt_ctx->enc_cipher = type;
338 static __rte_always_inline uint32_t
339 fill_sg_comp(sg_comp_t *list,
341 phys_addr_t dma_addr,
344 sg_comp_t *to = &list[i>>2];
346 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
347 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
352 static __rte_always_inline uint32_t
353 fill_sg_comp_from_buf(sg_comp_t *list,
357 sg_comp_t *to = &list[i>>2];
359 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
360 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
365 static __rte_always_inline uint32_t
366 fill_sg_comp_from_buf_min(sg_comp_t *list,
371 sg_comp_t *to = &list[i >> 2];
372 uint32_t size = *psize;
375 e_len = (size > from->size) ? from->size : size;
376 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
377 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
384 * This fills the MC expected SGIO list
385 * from IOV given by user.
387 static __rte_always_inline uint32_t
388 fill_sg_comp_from_iov(sg_comp_t *list,
390 iov_ptr_t *from, uint32_t from_offset,
391 uint32_t *psize, buf_ptr_t *extra_buf,
392 uint32_t extra_offset)
395 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
396 uint32_t size = *psize;
400 for (j = 0; (j < from->buf_cnt) && size; j++) {
401 phys_addr_t e_dma_addr;
403 sg_comp_t *to = &list[i >> 2];
405 if (unlikely(from_offset)) {
406 if (from_offset >= bufs[j].size) {
407 from_offset -= bufs[j].size;
410 e_dma_addr = bufs[j].dma_addr + from_offset;
411 e_len = (size > (bufs[j].size - from_offset)) ?
412 (bufs[j].size - from_offset) : size;
415 e_dma_addr = bufs[j].dma_addr;
416 e_len = (size > bufs[j].size) ?
420 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
421 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
423 if (extra_len && (e_len >= extra_offset)) {
424 /* Break the data at given offset */
425 uint32_t next_len = e_len - extra_offset;
426 phys_addr_t next_dma = e_dma_addr + extra_offset;
431 e_len = extra_offset;
433 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
436 extra_len = RTE_MIN(extra_len, size);
437 /* Insert extra data ptr */
442 rte_cpu_to_be_16(extra_len);
444 rte_cpu_to_be_64(extra_buf->dma_addr);
448 next_len = RTE_MIN(next_len, size);
449 /* insert the rest of the data */
453 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
454 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
463 extra_offset -= size;
471 static __rte_always_inline void
472 cpt_digest_gen_prep(uint32_t flags,
474 digest_params_t *params,
478 struct cpt_request_info *req;
480 uint16_t data_len, mac_len, key_len;
481 auth_type_t hash_type;
484 sg_comp_t *gather_comp;
485 sg_comp_t *scatter_comp;
487 uint32_t g_size_bytes, s_size_bytes;
488 uint64_t dptr_dma, rptr_dma;
489 vq_cmd_word0_t vq_cmd_w0;
490 void *c_vaddr, *m_vaddr;
491 uint64_t c_dma, m_dma;
493 ctx = params->ctx_buf.vaddr;
494 meta_p = ¶ms->meta_buf;
496 m_vaddr = meta_p->vaddr;
497 m_dma = meta_p->dma_addr;
500 * Save initial space that followed app data for completion code &
501 * alternate completion code to fall in same cache line as app data
503 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
504 m_dma += COMPLETION_CODE_SIZE;
505 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
507 c_vaddr = (uint8_t *)m_vaddr + size;
508 c_dma = m_dma + size;
509 size += sizeof(cpt_res_s_t);
511 m_vaddr = (uint8_t *)m_vaddr + size;
516 size = sizeof(struct cpt_request_info);
517 m_vaddr = (uint8_t *)m_vaddr + size;
520 hash_type = ctx->hash_type;
521 mac_len = ctx->mac_len;
522 key_len = ctx->auth_key_len;
523 data_len = AUTH_DLEN(d_lens);
526 vq_cmd_w0.s.opcode.minor = 0;
527 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
529 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
530 vq_cmd_w0.s.param1 = key_len;
531 vq_cmd_w0.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
533 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
534 vq_cmd_w0.s.param1 = 0;
535 vq_cmd_w0.s.dlen = data_len;
538 /* Null auth only case enters the if */
539 if (unlikely(!hash_type && !ctx->enc_cipher)) {
540 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
541 /* Minor op is passthrough */
542 vq_cmd_w0.s.opcode.minor = 0x03;
543 /* Send out completion code only */
544 vq_cmd_w0.s.param2 = 0x1;
547 /* DPTR has SG list */
551 ((uint16_t *)in_buffer)[0] = 0;
552 ((uint16_t *)in_buffer)[1] = 0;
554 /* TODO Add error check if space will be sufficient */
555 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
564 uint64_t k_dma = params->ctx_buf.dma_addr +
565 offsetof(struct cpt_ctx, auth_key);
567 i = fill_sg_comp(gather_comp, i, k_dma,
568 RTE_ALIGN_CEIL(key_len, 8));
574 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
576 if (unlikely(size)) {
577 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
583 * Looks like we need to support zero data
584 * gather ptr in case of hash & hmac
588 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
589 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
596 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
598 if (flags & VALID_MAC_BUF) {
599 if (unlikely(params->mac_buf.size < mac_len)) {
600 CPT_LOG_DP_ERR("Insufficient MAC size");
605 i = fill_sg_comp_from_buf_min(scatter_comp, i,
606 ¶ms->mac_buf, &size);
609 i = fill_sg_comp_from_iov(scatter_comp, i,
610 params->src_iov, data_len,
612 if (unlikely(size)) {
613 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
619 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
620 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
622 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
624 /* This is DPTR len incase of SG mode */
625 vq_cmd_w0.s.dlen = size;
627 m_vaddr = (uint8_t *)m_vaddr + size;
630 /* cpt alternate completion address saved earlier */
631 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
632 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
633 rptr_dma = c_dma - 8;
635 req->ist.ei1 = dptr_dma;
636 req->ist.ei2 = rptr_dma;
638 /* 16 byte aligned cpt res address */
639 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
640 *req->completion_addr = COMPLETION_CODE_INIT;
641 req->comp_baddr = c_dma;
643 /* Fill microcode part of instruction */
644 req->ist.ei0 = vq_cmd_w0.u64;
652 static __rte_always_inline void
653 cpt_enc_hmac_prep(uint32_t flags,
656 fc_params_t *fc_params,
660 uint32_t iv_offset = 0;
661 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
662 struct cpt_ctx *cpt_ctx;
663 uint32_t cipher_type, hash_type;
664 uint32_t mac_len, size;
666 struct cpt_request_info *req;
667 buf_ptr_t *meta_p, *aad_buf = NULL;
668 uint32_t encr_offset, auth_offset;
669 uint32_t encr_data_len, auth_data_len, aad_len = 0;
670 uint32_t passthrough_len = 0;
671 void *m_vaddr, *offset_vaddr;
672 uint64_t m_dma, offset_dma;
673 vq_cmd_word0_t vq_cmd_w0;
677 meta_p = &fc_params->meta_buf;
678 m_vaddr = meta_p->vaddr;
679 m_dma = meta_p->dma_addr;
681 encr_offset = ENCR_OFFSET(d_offs);
682 auth_offset = AUTH_OFFSET(d_offs);
683 encr_data_len = ENCR_DLEN(d_lens);
684 auth_data_len = AUTH_DLEN(d_lens);
685 if (unlikely(flags & VALID_AAD_BUF)) {
687 * We dont support both aad
688 * and auth data separately
692 aad_len = fc_params->aad_buf.size;
693 aad_buf = &fc_params->aad_buf;
695 cpt_ctx = fc_params->ctx_buf.vaddr;
696 cipher_type = cpt_ctx->enc_cipher;
697 hash_type = cpt_ctx->hash_type;
698 mac_len = cpt_ctx->mac_len;
701 * Save initial space that followed app data for completion code &
702 * alternate completion code to fall in same cache line as app data
704 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
705 m_dma += COMPLETION_CODE_SIZE;
706 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
709 c_vaddr = (uint8_t *)m_vaddr + size;
710 c_dma = m_dma + size;
711 size += sizeof(cpt_res_s_t);
713 m_vaddr = (uint8_t *)m_vaddr + size;
716 /* start cpt request info struct at 8 byte boundary */
717 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
720 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
722 size += sizeof(struct cpt_request_info);
723 m_vaddr = (uint8_t *)m_vaddr + size;
726 if (unlikely(!(flags & VALID_IV_BUF))) {
728 iv_offset = ENCR_IV_OFFSET(d_offs);
731 if (unlikely(flags & VALID_AAD_BUF)) {
733 * When AAD is given, data above encr_offset is pass through
734 * Since AAD is given as separate pointer and not as offset,
735 * this is a special case as we need to fragment input data
736 * into passthrough + encr_data and then insert AAD in between.
738 if (hash_type != GMAC_TYPE) {
739 passthrough_len = encr_offset;
740 auth_offset = passthrough_len + iv_len;
741 encr_offset = passthrough_len + aad_len + iv_len;
742 auth_data_len = aad_len + encr_data_len;
744 passthrough_len = 16 + aad_len;
745 auth_offset = passthrough_len + iv_len;
746 auth_data_len = aad_len;
749 encr_offset += iv_len;
750 auth_offset += iv_len;
754 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
755 vq_cmd_w0.s.opcode.minor = CPT_FC_MINOR_OP_ENCRYPT;
756 vq_cmd_w0.s.opcode.minor |= (cpt_ctx->auth_enc <<
757 CPT_HMAC_FIRST_BIT_POS);
759 if (hash_type == GMAC_TYPE) {
764 auth_dlen = auth_offset + auth_data_len;
765 enc_dlen = encr_data_len + encr_offset;
766 if (unlikely(encr_data_len & 0xf)) {
767 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
768 enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
770 else if (likely((cipher_type == AES_CBC) ||
771 (cipher_type == AES_ECB)))
772 enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
776 if (unlikely(auth_dlen > enc_dlen)) {
777 inputlen = auth_dlen;
778 outputlen = auth_dlen + mac_len;
781 outputlen = enc_dlen + mac_len;
784 if (cpt_ctx->auth_enc != 0)
785 outputlen = enc_dlen;
788 vq_cmd_w0.s.param1 = encr_data_len;
789 vq_cmd_w0.s.param2 = auth_data_len;
791 * In 83XX since we have a limitation of
792 * IV & Offset control word not part of instruction
793 * and need to be part of Data Buffer, we check if
794 * head room is there and then only do the Direct mode processing
796 if (likely((flags & SINGLE_BUF_INPLACE) &&
797 (flags & SINGLE_BUF_HEADTAILROOM))) {
798 void *dm_vaddr = fc_params->bufs[0].vaddr;
799 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
801 * This flag indicates that there is 24 bytes head room and
802 * 8 bytes tail room available, so that we get to do
803 * DIRECT MODE with limitation
806 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
807 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
810 req->ist.ei1 = offset_dma;
811 /* RPTR should just exclude offset control word */
812 req->ist.ei2 = dm_dma_addr - iv_len;
813 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
814 + outputlen - iv_len);
816 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
818 if (likely(iv_len)) {
819 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
821 uint64_t *src = fc_params->iv_buf;
826 *(uint64_t *)offset_vaddr =
827 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
828 ((uint64_t)iv_offset << 8) |
829 ((uint64_t)auth_offset));
832 uint32_t i, g_size_bytes, s_size_bytes;
833 uint64_t dptr_dma, rptr_dma;
834 sg_comp_t *gather_comp;
835 sg_comp_t *scatter_comp;
838 /* This falls under strict SG mode */
839 offset_vaddr = m_vaddr;
841 size = OFF_CTRL_LEN + iv_len;
843 m_vaddr = (uint8_t *)m_vaddr + size;
846 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
848 if (likely(iv_len)) {
849 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
851 uint64_t *src = fc_params->iv_buf;
856 *(uint64_t *)offset_vaddr =
857 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
858 ((uint64_t)iv_offset << 8) |
859 ((uint64_t)auth_offset));
861 /* DPTR has SG list */
865 ((uint16_t *)in_buffer)[0] = 0;
866 ((uint16_t *)in_buffer)[1] = 0;
868 /* TODO Add error check if space will be sufficient */
869 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
877 /* Offset control word that includes iv */
878 i = fill_sg_comp(gather_comp, i, offset_dma,
879 OFF_CTRL_LEN + iv_len);
882 size = inputlen - iv_len;
884 uint32_t aad_offset = aad_len ? passthrough_len : 0;
886 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
887 i = fill_sg_comp_from_buf_min(gather_comp, i,
891 i = fill_sg_comp_from_iov(gather_comp, i,
894 aad_buf, aad_offset);
897 if (unlikely(size)) {
898 CPT_LOG_DP_ERR("Insufficient buffer space,"
899 " size %d needed", size);
903 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
904 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
907 * Output Scatter list
911 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
914 if (likely(iv_len)) {
915 i = fill_sg_comp(scatter_comp, i,
916 offset_dma + OFF_CTRL_LEN,
920 /* output data or output data + digest*/
921 if (unlikely(flags & VALID_MAC_BUF)) {
922 size = outputlen - iv_len - mac_len;
924 uint32_t aad_offset =
925 aad_len ? passthrough_len : 0;
927 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
928 i = fill_sg_comp_from_buf_min(
934 i = fill_sg_comp_from_iov(scatter_comp,
942 if (unlikely(size)) {
943 CPT_LOG_DP_ERR("Insufficient buffer"
944 " space, size %d needed",
951 i = fill_sg_comp_from_buf(scatter_comp, i,
952 &fc_params->mac_buf);
955 /* Output including mac */
956 size = outputlen - iv_len;
958 uint32_t aad_offset =
959 aad_len ? passthrough_len : 0;
961 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
962 i = fill_sg_comp_from_buf_min(
968 i = fill_sg_comp_from_iov(scatter_comp,
976 if (unlikely(size)) {
977 CPT_LOG_DP_ERR("Insufficient buffer"
978 " space, size %d needed",
984 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
985 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
987 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
989 /* This is DPTR len incase of SG mode */
990 vq_cmd_w0.s.dlen = size;
992 m_vaddr = (uint8_t *)m_vaddr + size;
995 /* cpt alternate completion address saved earlier */
996 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
997 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
998 rptr_dma = c_dma - 8;
1000 req->ist.ei1 = dptr_dma;
1001 req->ist.ei2 = rptr_dma;
1004 if (unlikely((encr_offset >> 16) ||
1006 (auth_offset >> 8))) {
1007 CPT_LOG_DP_ERR("Offset not supported");
1008 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1009 CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1010 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1014 /* 16 byte aligned cpt res address */
1015 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1016 *req->completion_addr = COMPLETION_CODE_INIT;
1017 req->comp_baddr = c_dma;
1019 /* Fill microcode part of instruction */
1020 req->ist.ei0 = vq_cmd_w0.u64;
1028 static __rte_always_inline void
1029 cpt_dec_hmac_prep(uint32_t flags,
1032 fc_params_t *fc_params,
1036 uint32_t iv_offset = 0, size;
1037 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1038 struct cpt_ctx *cpt_ctx;
1039 int32_t hash_type, mac_len;
1040 uint8_t iv_len = 16;
1041 struct cpt_request_info *req;
1042 buf_ptr_t *meta_p, *aad_buf = NULL;
1043 uint32_t encr_offset, auth_offset;
1044 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1045 uint32_t passthrough_len = 0;
1046 void *m_vaddr, *offset_vaddr;
1047 uint64_t m_dma, offset_dma;
1048 vq_cmd_word0_t vq_cmd_w0;
1052 meta_p = &fc_params->meta_buf;
1053 m_vaddr = meta_p->vaddr;
1054 m_dma = meta_p->dma_addr;
1056 encr_offset = ENCR_OFFSET(d_offs);
1057 auth_offset = AUTH_OFFSET(d_offs);
1058 encr_data_len = ENCR_DLEN(d_lens);
1059 auth_data_len = AUTH_DLEN(d_lens);
1061 if (unlikely(flags & VALID_AAD_BUF)) {
1063 * We dont support both aad
1064 * and auth data separately
1068 aad_len = fc_params->aad_buf.size;
1069 aad_buf = &fc_params->aad_buf;
1072 cpt_ctx = fc_params->ctx_buf.vaddr;
1073 hash_type = cpt_ctx->hash_type;
1074 mac_len = cpt_ctx->mac_len;
1076 if (unlikely(!(flags & VALID_IV_BUF))) {
1078 iv_offset = ENCR_IV_OFFSET(d_offs);
1081 if (unlikely(flags & VALID_AAD_BUF)) {
1083 * When AAD is given, data above encr_offset is pass through
1084 * Since AAD is given as separate pointer and not as offset,
1085 * this is a special case as we need to fragment input data
1086 * into passthrough + encr_data and then insert AAD in between.
1088 if (hash_type != GMAC_TYPE) {
1089 passthrough_len = encr_offset;
1090 auth_offset = passthrough_len + iv_len;
1091 encr_offset = passthrough_len + aad_len + iv_len;
1092 auth_data_len = aad_len + encr_data_len;
1094 passthrough_len = 16 + aad_len;
1095 auth_offset = passthrough_len + iv_len;
1096 auth_data_len = aad_len;
1099 encr_offset += iv_len;
1100 auth_offset += iv_len;
1104 * Save initial space that followed app data for completion code &
1105 * alternate completion code to fall in same cache line as app data
1107 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1108 m_dma += COMPLETION_CODE_SIZE;
1109 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1111 c_vaddr = (uint8_t *)m_vaddr + size;
1112 c_dma = m_dma + size;
1113 size += sizeof(cpt_res_s_t);
1115 m_vaddr = (uint8_t *)m_vaddr + size;
1118 /* start cpt request info structure at 8 byte alignment */
1119 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1122 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1124 size += sizeof(struct cpt_request_info);
1125 m_vaddr = (uint8_t *)m_vaddr + size;
1129 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
1130 vq_cmd_w0.s.opcode.minor = CPT_FC_MINOR_OP_DECRYPT;
1131 vq_cmd_w0.s.opcode.minor |= (cpt_ctx->dec_auth <<
1132 CPT_HMAC_FIRST_BIT_POS);
1134 if (hash_type == GMAC_TYPE) {
1139 enc_dlen = encr_offset + encr_data_len;
1140 auth_dlen = auth_offset + auth_data_len;
1142 if (auth_dlen > enc_dlen) {
1143 inputlen = auth_dlen + mac_len;
1144 outputlen = auth_dlen;
1146 inputlen = enc_dlen + mac_len;
1147 outputlen = enc_dlen;
1150 if (cpt_ctx->dec_auth != 0)
1151 outputlen = inputlen = enc_dlen;
1153 vq_cmd_w0.s.param1 = encr_data_len;
1154 vq_cmd_w0.s.param2 = auth_data_len;
1157 * In 83XX since we have a limitation of
1158 * IV & Offset control word not part of instruction
1159 * and need to be part of Data Buffer, we check if
1160 * head room is there and then only do the Direct mode processing
1162 if (likely((flags & SINGLE_BUF_INPLACE) &&
1163 (flags & SINGLE_BUF_HEADTAILROOM))) {
1164 void *dm_vaddr = fc_params->bufs[0].vaddr;
1165 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1167 * This flag indicates that there is 24 bytes head room and
1168 * 8 bytes tail room available, so that we get to do
1169 * DIRECT MODE with limitation
1172 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1173 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1174 req->ist.ei1 = offset_dma;
1176 /* RPTR should just exclude offset control word */
1177 req->ist.ei2 = dm_dma_addr - iv_len;
1179 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1180 outputlen - iv_len);
1181 /* since this is decryption,
1182 * don't touch the content of
1183 * alternate ccode space as it contains
1187 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1189 if (likely(iv_len)) {
1190 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1192 uint64_t *src = fc_params->iv_buf;
1197 if (unlikely((encr_offset >> 16) ||
1199 (auth_offset >> 8))) {
1200 CPT_LOG_DP_ERR("Offset not supported");
1201 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1202 CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1203 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1207 *(uint64_t *)offset_vaddr =
1208 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1209 ((uint64_t)iv_offset << 8) |
1210 ((uint64_t)auth_offset));
1213 uint64_t dptr_dma, rptr_dma;
1214 uint32_t g_size_bytes, s_size_bytes;
1215 sg_comp_t *gather_comp;
1216 sg_comp_t *scatter_comp;
1220 /* This falls under strict SG mode */
1221 offset_vaddr = m_vaddr;
1223 size = OFF_CTRL_LEN + iv_len;
1225 m_vaddr = (uint8_t *)m_vaddr + size;
1228 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1230 if (likely(iv_len)) {
1231 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1233 uint64_t *src = fc_params->iv_buf;
1238 if (unlikely((encr_offset >> 16) ||
1240 (auth_offset >> 8))) {
1241 CPT_LOG_DP_ERR("Offset not supported");
1242 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1243 CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1244 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1248 *(uint64_t *)offset_vaddr =
1249 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1250 ((uint64_t)iv_offset << 8) |
1251 ((uint64_t)auth_offset));
1253 /* DPTR has SG list */
1254 in_buffer = m_vaddr;
1257 ((uint16_t *)in_buffer)[0] = 0;
1258 ((uint16_t *)in_buffer)[1] = 0;
1260 /* TODO Add error check if space will be sufficient */
1261 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1268 /* Offset control word that includes iv */
1269 i = fill_sg_comp(gather_comp, i, offset_dma,
1270 OFF_CTRL_LEN + iv_len);
1272 /* Add input data */
1273 if (flags & VALID_MAC_BUF) {
1274 size = inputlen - iv_len - mac_len;
1276 /* input data only */
1277 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1278 i = fill_sg_comp_from_buf_min(
1283 uint32_t aad_offset = aad_len ?
1284 passthrough_len : 0;
1286 i = fill_sg_comp_from_iov(gather_comp,
1293 if (unlikely(size)) {
1294 CPT_LOG_DP_ERR("Insufficient buffer"
1295 " space, size %d needed",
1303 i = fill_sg_comp_from_buf(gather_comp, i,
1304 &fc_params->mac_buf);
1307 /* input data + mac */
1308 size = inputlen - iv_len;
1310 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1311 i = fill_sg_comp_from_buf_min(
1316 uint32_t aad_offset = aad_len ?
1317 passthrough_len : 0;
1319 if (unlikely(!fc_params->src_iov)) {
1320 CPT_LOG_DP_ERR("Bad input args");
1324 i = fill_sg_comp_from_iov(
1332 if (unlikely(size)) {
1333 CPT_LOG_DP_ERR("Insufficient buffer"
1334 " space, size %d needed",
1340 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1341 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1344 * Output Scatter List
1349 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1353 i = fill_sg_comp(scatter_comp, i,
1354 offset_dma + OFF_CTRL_LEN,
1358 /* Add output data */
1359 size = outputlen - iv_len;
1361 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1362 /* handle single buffer here */
1363 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1367 uint32_t aad_offset = aad_len ?
1368 passthrough_len : 0;
1370 if (unlikely(!fc_params->dst_iov)) {
1371 CPT_LOG_DP_ERR("Bad input args");
1375 i = fill_sg_comp_from_iov(scatter_comp, i,
1376 fc_params->dst_iov, 0,
1381 if (unlikely(size)) {
1382 CPT_LOG_DP_ERR("Insufficient buffer space,"
1383 " size %d needed", size);
1388 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1389 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1391 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1393 /* This is DPTR len incase of SG mode */
1394 vq_cmd_w0.s.dlen = size;
1396 m_vaddr = (uint8_t *)m_vaddr + size;
1399 /* cpt alternate completion address saved earlier */
1400 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1401 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1402 rptr_dma = c_dma - 8;
1403 size += COMPLETION_CODE_SIZE;
1405 req->ist.ei1 = dptr_dma;
1406 req->ist.ei2 = rptr_dma;
1409 /* 16 byte aligned cpt res address */
1410 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1411 *req->completion_addr = COMPLETION_CODE_INIT;
1412 req->comp_baddr = c_dma;
1414 /* Fill microcode part of instruction */
1415 req->ist.ei0 = vq_cmd_w0.u64;
1423 static __rte_always_inline void
1424 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1427 fc_params_t *params,
1432 int32_t inputlen, outputlen;
1433 struct cpt_ctx *cpt_ctx;
1434 uint32_t mac_len = 0;
1436 struct cpt_request_info *req;
1438 uint32_t encr_offset = 0, auth_offset = 0;
1439 uint32_t encr_data_len = 0, auth_data_len = 0;
1440 int flags, iv_len = 16;
1441 void *m_vaddr, *c_vaddr;
1442 uint64_t m_dma, c_dma, offset_ctrl;
1443 uint64_t *offset_vaddr, offset_dma;
1444 uint32_t *iv_s, iv[4];
1445 vq_cmd_word0_t vq_cmd_w0;
1447 buf_p = ¶ms->meta_buf;
1448 m_vaddr = buf_p->vaddr;
1449 m_dma = buf_p->dma_addr;
1451 cpt_ctx = params->ctx_buf.vaddr;
1452 flags = cpt_ctx->zsk_flags;
1453 mac_len = cpt_ctx->mac_len;
1454 snow3g = cpt_ctx->snow3g;
1457 * Save initial space that followed app data for completion code &
1458 * alternate completion code to fall in same cache line as app data
1460 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1461 m_dma += COMPLETION_CODE_SIZE;
1462 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1465 c_vaddr = (uint8_t *)m_vaddr + size;
1466 c_dma = m_dma + size;
1467 size += sizeof(cpt_res_s_t);
1469 m_vaddr = (uint8_t *)m_vaddr + size;
1472 /* Reserve memory for cpt request info */
1475 size = sizeof(struct cpt_request_info);
1476 m_vaddr = (uint8_t *)m_vaddr + size;
1479 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1481 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1483 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1484 (0 << 3) | (flags & 0x7));
1488 * Microcode expects offsets in bytes
1489 * TODO: Rounding off
1491 auth_data_len = AUTH_DLEN(d_lens);
1494 auth_offset = AUTH_OFFSET(d_offs);
1495 auth_offset = auth_offset / 8;
1497 /* consider iv len */
1498 auth_offset += iv_len;
1500 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1501 outputlen = mac_len;
1503 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1508 * Microcode expects offsets in bytes
1509 * TODO: Rounding off
1511 encr_data_len = ENCR_DLEN(d_lens);
1513 encr_offset = ENCR_OFFSET(d_offs);
1514 encr_offset = encr_offset / 8;
1515 /* consider iv len */
1516 encr_offset += iv_len;
1518 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1519 outputlen = inputlen;
1521 /* iv offset is 0 */
1522 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1525 if (unlikely((encr_offset >> 16) ||
1526 (auth_offset >> 8))) {
1527 CPT_LOG_DP_ERR("Offset not supported");
1528 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1529 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1534 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1539 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1540 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1543 for (j = 0; j < 4; j++)
1544 iv[j] = iv_s[3 - j];
1546 /* ZUC doesn't need a swap */
1547 for (j = 0; j < 4; j++)
1552 * GP op header, lengths are expected in bits.
1554 vq_cmd_w0.s.param1 = encr_data_len;
1555 vq_cmd_w0.s.param2 = auth_data_len;
1558 * In 83XX since we have a limitation of
1559 * IV & Offset control word not part of instruction
1560 * and need to be part of Data Buffer, we check if
1561 * head room is there and then only do the Direct mode processing
1563 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1564 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1565 void *dm_vaddr = params->bufs[0].vaddr;
1566 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1568 * This flag indicates that there is 24 bytes head room and
1569 * 8 bytes tail room available, so that we get to do
1570 * DIRECT MODE with limitation
1573 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1574 OFF_CTRL_LEN - iv_len);
1575 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1578 req->ist.ei1 = offset_dma;
1579 /* RPTR should just exclude offset control word */
1580 req->ist.ei2 = dm_dma_addr - iv_len;
1581 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1582 + outputlen - iv_len);
1584 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1586 if (likely(iv_len)) {
1587 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1589 memcpy(iv_d, iv, 16);
1592 *offset_vaddr = offset_ctrl;
1594 uint32_t i, g_size_bytes, s_size_bytes;
1595 uint64_t dptr_dma, rptr_dma;
1596 sg_comp_t *gather_comp;
1597 sg_comp_t *scatter_comp;
1601 /* save space for iv */
1602 offset_vaddr = m_vaddr;
1605 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1606 m_dma += OFF_CTRL_LEN + iv_len;
1608 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1610 /* DPTR has SG list */
1611 in_buffer = m_vaddr;
1614 ((uint16_t *)in_buffer)[0] = 0;
1615 ((uint16_t *)in_buffer)[1] = 0;
1617 /* TODO Add error check if space will be sufficient */
1618 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1625 /* Offset control word followed by iv */
1627 i = fill_sg_comp(gather_comp, i, offset_dma,
1628 OFF_CTRL_LEN + iv_len);
1630 /* iv offset is 0 */
1631 *offset_vaddr = offset_ctrl;
1633 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1634 memcpy(iv_d, iv, 16);
1637 size = inputlen - iv_len;
1639 i = fill_sg_comp_from_iov(gather_comp, i,
1642 if (unlikely(size)) {
1643 CPT_LOG_DP_ERR("Insufficient buffer space,"
1644 " size %d needed", size);
1648 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1649 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1652 * Output Scatter List
1657 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1660 /* IV in SLIST only for EEA3 & UEA2 */
1665 i = fill_sg_comp(scatter_comp, i,
1666 offset_dma + OFF_CTRL_LEN, iv_len);
1669 /* Add output data */
1670 if (req_flags & VALID_MAC_BUF) {
1671 size = outputlen - iv_len - mac_len;
1673 i = fill_sg_comp_from_iov(scatter_comp, i,
1677 if (unlikely(size)) {
1678 CPT_LOG_DP_ERR("Insufficient buffer space,"
1679 " size %d needed", size);
1686 i = fill_sg_comp_from_buf(scatter_comp, i,
1690 /* Output including mac */
1691 size = outputlen - iv_len;
1693 i = fill_sg_comp_from_iov(scatter_comp, i,
1697 if (unlikely(size)) {
1698 CPT_LOG_DP_ERR("Insufficient buffer space,"
1699 " size %d needed", size);
1704 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1705 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1707 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1709 /* This is DPTR len incase of SG mode */
1710 vq_cmd_w0.s.dlen = size;
1712 m_vaddr = (uint8_t *)m_vaddr + size;
1715 /* cpt alternate completion address saved earlier */
1716 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1717 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1718 rptr_dma = c_dma - 8;
1720 req->ist.ei1 = dptr_dma;
1721 req->ist.ei2 = rptr_dma;
1724 /* 16 byte aligned cpt res address */
1725 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1726 *req->completion_addr = COMPLETION_CODE_INIT;
1727 req->comp_baddr = c_dma;
1729 /* Fill microcode part of instruction */
1730 req->ist.ei0 = vq_cmd_w0.u64;
1738 static __rte_always_inline void
1739 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1742 fc_params_t *params,
1747 int32_t inputlen = 0, outputlen;
1748 struct cpt_ctx *cpt_ctx;
1749 uint8_t snow3g, iv_len = 16;
1750 struct cpt_request_info *req;
1752 uint32_t encr_offset;
1753 uint32_t encr_data_len;
1755 void *m_vaddr, *c_vaddr;
1756 uint64_t m_dma, c_dma;
1757 uint64_t *offset_vaddr, offset_dma;
1758 uint32_t *iv_s, iv[4], j;
1759 vq_cmd_word0_t vq_cmd_w0;
1761 buf_p = ¶ms->meta_buf;
1762 m_vaddr = buf_p->vaddr;
1763 m_dma = buf_p->dma_addr;
1766 * Microcode expects offsets in bytes
1767 * TODO: Rounding off
1769 encr_offset = ENCR_OFFSET(d_offs) / 8;
1770 encr_data_len = ENCR_DLEN(d_lens);
1772 cpt_ctx = params->ctx_buf.vaddr;
1773 flags = cpt_ctx->zsk_flags;
1774 snow3g = cpt_ctx->snow3g;
1776 * Save initial space that followed app data for completion code &
1777 * alternate completion code to fall in same cache line as app data
1779 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1780 m_dma += COMPLETION_CODE_SIZE;
1781 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1784 c_vaddr = (uint8_t *)m_vaddr + size;
1785 c_dma = m_dma + size;
1786 size += sizeof(cpt_res_s_t);
1788 m_vaddr = (uint8_t *)m_vaddr + size;
1791 /* Reserve memory for cpt request info */
1794 size = sizeof(struct cpt_request_info);
1795 m_vaddr = (uint8_t *)m_vaddr + size;
1799 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1801 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1803 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1804 (0 << 3) | (flags & 0x7));
1806 /* consider iv len */
1807 encr_offset += iv_len;
1809 inputlen = encr_offset +
1810 (RTE_ALIGN(encr_data_len, 8) / 8);
1811 outputlen = inputlen;
1814 iv_s = params->iv_buf;
1817 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1818 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1821 for (j = 0; j < 4; j++)
1822 iv[j] = iv_s[3 - j];
1824 /* ZUC doesn't need a swap */
1825 for (j = 0; j < 4; j++)
1830 * GP op header, lengths are expected in bits.
1832 vq_cmd_w0.s.param1 = encr_data_len;
1835 * In 83XX since we have a limitation of
1836 * IV & Offset control word not part of instruction
1837 * and need to be part of Data Buffer, we check if
1838 * head room is there and then only do the Direct mode processing
1840 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1841 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1842 void *dm_vaddr = params->bufs[0].vaddr;
1843 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1845 * This flag indicates that there is 24 bytes head room and
1846 * 8 bytes tail room available, so that we get to do
1847 * DIRECT MODE with limitation
1850 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1851 OFF_CTRL_LEN - iv_len);
1852 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1855 req->ist.ei1 = offset_dma;
1856 /* RPTR should just exclude offset control word */
1857 req->ist.ei2 = dm_dma_addr - iv_len;
1858 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1859 + outputlen - iv_len);
1861 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1863 if (likely(iv_len)) {
1864 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1866 memcpy(iv_d, iv, 16);
1869 /* iv offset is 0 */
1870 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1872 uint32_t i, g_size_bytes, s_size_bytes;
1873 uint64_t dptr_dma, rptr_dma;
1874 sg_comp_t *gather_comp;
1875 sg_comp_t *scatter_comp;
1879 /* save space for offset and iv... */
1880 offset_vaddr = m_vaddr;
1883 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1884 m_dma += OFF_CTRL_LEN + iv_len;
1886 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1888 /* DPTR has SG list */
1889 in_buffer = m_vaddr;
1892 ((uint16_t *)in_buffer)[0] = 0;
1893 ((uint16_t *)in_buffer)[1] = 0;
1895 /* TODO Add error check if space will be sufficient */
1896 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1903 /* Offset control word */
1905 /* iv offset is 0 */
1906 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1908 i = fill_sg_comp(gather_comp, i, offset_dma,
1909 OFF_CTRL_LEN + iv_len);
1911 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1912 memcpy(iv_d, iv, 16);
1914 /* Add input data */
1915 size = inputlen - iv_len;
1917 i = fill_sg_comp_from_iov(gather_comp, i,
1920 if (unlikely(size)) {
1921 CPT_LOG_DP_ERR("Insufficient buffer space,"
1922 " size %d needed", size);
1926 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1927 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1930 * Output Scatter List
1935 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1938 i = fill_sg_comp(scatter_comp, i,
1939 offset_dma + OFF_CTRL_LEN,
1942 /* Add output data */
1943 size = outputlen - iv_len;
1945 i = fill_sg_comp_from_iov(scatter_comp, i,
1949 if (unlikely(size)) {
1950 CPT_LOG_DP_ERR("Insufficient buffer space,"
1951 " size %d needed", size);
1955 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1956 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1958 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1960 /* This is DPTR len incase of SG mode */
1961 vq_cmd_w0.s.dlen = size;
1963 m_vaddr = (uint8_t *)m_vaddr + size;
1966 /* cpt alternate completion address saved earlier */
1967 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1968 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1969 rptr_dma = c_dma - 8;
1971 req->ist.ei1 = dptr_dma;
1972 req->ist.ei2 = rptr_dma;
1975 if (unlikely((encr_offset >> 16))) {
1976 CPT_LOG_DP_ERR("Offset not supported");
1977 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1981 /* 16 byte aligned cpt res address */
1982 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1983 *req->completion_addr = COMPLETION_CODE_INIT;
1984 req->comp_baddr = c_dma;
1986 /* Fill microcode part of instruction */
1987 req->ist.ei0 = vq_cmd_w0.u64;
1995 static __rte_always_inline void
1996 cpt_kasumi_enc_prep(uint32_t req_flags,
1999 fc_params_t *params,
2004 int32_t inputlen = 0, outputlen = 0;
2005 struct cpt_ctx *cpt_ctx;
2006 uint32_t mac_len = 0;
2008 struct cpt_request_info *req;
2010 uint32_t encr_offset, auth_offset;
2011 uint32_t encr_data_len, auth_data_len;
2013 uint8_t *iv_s, *iv_d, iv_len = 8;
2015 void *m_vaddr, *c_vaddr;
2016 uint64_t m_dma, c_dma;
2017 uint64_t *offset_vaddr, offset_dma;
2018 vq_cmd_word0_t vq_cmd_w0;
2020 uint32_t g_size_bytes, s_size_bytes;
2021 uint64_t dptr_dma, rptr_dma;
2022 sg_comp_t *gather_comp;
2023 sg_comp_t *scatter_comp;
2025 buf_p = ¶ms->meta_buf;
2026 m_vaddr = buf_p->vaddr;
2027 m_dma = buf_p->dma_addr;
2029 encr_offset = ENCR_OFFSET(d_offs) / 8;
2030 auth_offset = AUTH_OFFSET(d_offs) / 8;
2031 encr_data_len = ENCR_DLEN(d_lens);
2032 auth_data_len = AUTH_DLEN(d_lens);
2034 cpt_ctx = params->ctx_buf.vaddr;
2035 flags = cpt_ctx->zsk_flags;
2036 mac_len = cpt_ctx->mac_len;
2039 iv_s = params->iv_buf;
2041 iv_s = params->auth_iv_buf;
2043 dir = iv_s[8] & 0x1;
2046 * Save initial space that followed app data for completion code &
2047 * alternate completion code to fall in same cache line as app data
2049 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2050 m_dma += COMPLETION_CODE_SIZE;
2051 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2054 c_vaddr = (uint8_t *)m_vaddr + size;
2055 c_dma = m_dma + size;
2056 size += sizeof(cpt_res_s_t);
2058 m_vaddr = (uint8_t *)m_vaddr + size;
2061 /* Reserve memory for cpt request info */
2064 size = sizeof(struct cpt_request_info);
2065 m_vaddr = (uint8_t *)m_vaddr + size;
2068 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2070 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2071 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2072 (dir << 4) | (0 << 3) | (flags & 0x7));
2075 * GP op header, lengths are expected in bits.
2077 vq_cmd_w0.s.param1 = encr_data_len;
2078 vq_cmd_w0.s.param2 = auth_data_len;
2080 /* consider iv len */
2082 encr_offset += iv_len;
2083 auth_offset += iv_len;
2086 /* save space for offset ctrl and iv */
2087 offset_vaddr = m_vaddr;
2090 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2091 m_dma += OFF_CTRL_LEN + iv_len;
2093 /* DPTR has SG list */
2094 in_buffer = m_vaddr;
2097 ((uint16_t *)in_buffer)[0] = 0;
2098 ((uint16_t *)in_buffer)[1] = 0;
2100 /* TODO Add error check if space will be sufficient */
2101 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2108 /* Offset control word followed by iv */
2111 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2112 outputlen = inputlen;
2113 /* iv offset is 0 */
2114 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2115 if (unlikely((encr_offset >> 16))) {
2116 CPT_LOG_DP_ERR("Offset not supported");
2117 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
2121 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2122 outputlen = mac_len;
2123 /* iv offset is 0 */
2124 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2125 if (unlikely((auth_offset >> 8))) {
2126 CPT_LOG_DP_ERR("Offset not supported");
2127 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
2132 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2135 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2136 memcpy(iv_d, iv_s, iv_len);
2139 size = inputlen - iv_len;
2141 i = fill_sg_comp_from_iov(gather_comp, i,
2145 if (unlikely(size)) {
2146 CPT_LOG_DP_ERR("Insufficient buffer space,"
2147 " size %d needed", size);
2151 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2152 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2155 * Output Scatter List
2159 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2162 /* IV in SLIST only for F8 */
2168 i = fill_sg_comp(scatter_comp, i,
2169 offset_dma + OFF_CTRL_LEN,
2173 /* Add output data */
2174 if (req_flags & VALID_MAC_BUF) {
2175 size = outputlen - iv_len - mac_len;
2177 i = fill_sg_comp_from_iov(scatter_comp, i,
2181 if (unlikely(size)) {
2182 CPT_LOG_DP_ERR("Insufficient buffer space,"
2183 " size %d needed", size);
2190 i = fill_sg_comp_from_buf(scatter_comp, i,
2194 /* Output including mac */
2195 size = outputlen - iv_len;
2197 i = fill_sg_comp_from_iov(scatter_comp, i,
2201 if (unlikely(size)) {
2202 CPT_LOG_DP_ERR("Insufficient buffer space,"
2203 " size %d needed", size);
2208 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2209 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2211 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2213 /* This is DPTR len incase of SG mode */
2214 vq_cmd_w0.s.dlen = size;
2216 m_vaddr = (uint8_t *)m_vaddr + size;
2219 /* cpt alternate completion address saved earlier */
2220 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2221 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2222 rptr_dma = c_dma - 8;
2224 req->ist.ei1 = dptr_dma;
2225 req->ist.ei2 = rptr_dma;
2227 /* 16 byte aligned cpt res address */
2228 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2229 *req->completion_addr = COMPLETION_CODE_INIT;
2230 req->comp_baddr = c_dma;
2232 /* Fill microcode part of instruction */
2233 req->ist.ei0 = vq_cmd_w0.u64;
2241 static __rte_always_inline void
2242 cpt_kasumi_dec_prep(uint64_t d_offs,
2244 fc_params_t *params,
2249 int32_t inputlen = 0, outputlen;
2250 struct cpt_ctx *cpt_ctx;
2251 uint8_t i = 0, iv_len = 8;
2252 struct cpt_request_info *req;
2254 uint32_t encr_offset;
2255 uint32_t encr_data_len;
2258 void *m_vaddr, *c_vaddr;
2259 uint64_t m_dma, c_dma;
2260 uint64_t *offset_vaddr, offset_dma;
2261 vq_cmd_word0_t vq_cmd_w0;
2263 uint32_t g_size_bytes, s_size_bytes;
2264 uint64_t dptr_dma, rptr_dma;
2265 sg_comp_t *gather_comp;
2266 sg_comp_t *scatter_comp;
2268 buf_p = ¶ms->meta_buf;
2269 m_vaddr = buf_p->vaddr;
2270 m_dma = buf_p->dma_addr;
2272 encr_offset = ENCR_OFFSET(d_offs) / 8;
2273 encr_data_len = ENCR_DLEN(d_lens);
2275 cpt_ctx = params->ctx_buf.vaddr;
2276 flags = cpt_ctx->zsk_flags;
2278 * Save initial space that followed app data for completion code &
2279 * alternate completion code to fall in same cache line as app data
2281 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2282 m_dma += COMPLETION_CODE_SIZE;
2283 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2286 c_vaddr = (uint8_t *)m_vaddr + size;
2287 c_dma = m_dma + size;
2288 size += sizeof(cpt_res_s_t);
2290 m_vaddr = (uint8_t *)m_vaddr + size;
2293 /* Reserve memory for cpt request info */
2296 size = sizeof(struct cpt_request_info);
2297 m_vaddr = (uint8_t *)m_vaddr + size;
2301 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2303 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2304 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2305 (dir << 4) | (0 << 3) | (flags & 0x7));
2308 * GP op header, lengths are expected in bits.
2310 vq_cmd_w0.s.param1 = encr_data_len;
2312 /* consider iv len */
2313 encr_offset += iv_len;
2315 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2316 outputlen = inputlen;
2318 /* save space for offset ctrl & iv */
2319 offset_vaddr = m_vaddr;
2322 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2323 m_dma += OFF_CTRL_LEN + iv_len;
2325 /* DPTR has SG list */
2326 in_buffer = m_vaddr;
2329 ((uint16_t *)in_buffer)[0] = 0;
2330 ((uint16_t *)in_buffer)[1] = 0;
2332 /* TODO Add error check if space will be sufficient */
2333 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2340 /* Offset control word followed by iv */
2341 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2342 if (unlikely((encr_offset >> 16))) {
2343 CPT_LOG_DP_ERR("Offset not supported");
2344 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
2348 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2351 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2352 params->iv_buf, iv_len);
2354 /* Add input data */
2355 size = inputlen - iv_len;
2357 i = fill_sg_comp_from_iov(gather_comp, i,
2360 if (unlikely(size)) {
2361 CPT_LOG_DP_ERR("Insufficient buffer space,"
2362 " size %d needed", size);
2366 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2367 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2370 * Output Scatter List
2374 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2377 i = fill_sg_comp(scatter_comp, i,
2378 offset_dma + OFF_CTRL_LEN,
2381 /* Add output data */
2382 size = outputlen - iv_len;
2384 i = fill_sg_comp_from_iov(scatter_comp, i,
2387 if (unlikely(size)) {
2388 CPT_LOG_DP_ERR("Insufficient buffer space,"
2389 " size %d needed", size);
2393 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2394 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2396 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2398 /* This is DPTR len incase of SG mode */
2399 vq_cmd_w0.s.dlen = size;
2401 m_vaddr = (uint8_t *)m_vaddr + size;
2404 /* cpt alternate completion address saved earlier */
2405 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2406 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2407 rptr_dma = c_dma - 8;
2409 req->ist.ei1 = dptr_dma;
2410 req->ist.ei2 = rptr_dma;
2412 /* 16 byte aligned cpt res address */
2413 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2414 *req->completion_addr = COMPLETION_CODE_INIT;
2415 req->comp_baddr = c_dma;
2417 /* Fill microcode part of instruction */
2418 req->ist.ei0 = vq_cmd_w0.u64;
2426 static __rte_always_inline void *
2427 cpt_fc_dec_hmac_prep(uint32_t flags,
2430 fc_params_t *fc_params,
2433 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2435 void *prep_req = NULL;
2437 fc_type = ctx->fc_type;
2439 if (likely(fc_type == FC_GEN)) {
2440 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2442 } else if (fc_type == ZUC_SNOW3G) {
2443 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2445 } else if (fc_type == KASUMI) {
2446 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2450 * For AUTH_ONLY case,
2451 * MC only supports digest generation and verification
2452 * should be done in software by memcmp()
2458 static __rte_always_inline void *__rte_hot
2459 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2460 fc_params_t *fc_params, void *op)
2462 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2464 void *prep_req = NULL;
2466 fc_type = ctx->fc_type;
2468 /* Common api for rest of the ops */
2469 if (likely(fc_type == FC_GEN)) {
2470 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2472 } else if (fc_type == ZUC_SNOW3G) {
2473 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2475 } else if (fc_type == KASUMI) {
2476 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2478 } else if (fc_type == HASH_HMAC) {
2479 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2485 static __rte_always_inline int
2486 cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
2487 const uint8_t *key, uint16_t key_len, uint16_t mac_len)
2489 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
2490 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
2491 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
2493 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2498 /* No support for AEAD yet */
2499 if (cpt_ctx->enc_cipher)
2501 /* For ZUC/SNOW3G/Kasumi */
2504 cpt_ctx->snow3g = 1;
2505 gen_key_snow3g(key, keyx);
2506 memcpy(zs_ctx->ci_key, keyx, key_len);
2507 cpt_ctx->fc_type = ZUC_SNOW3G;
2508 cpt_ctx->zsk_flags = 0x1;
2511 cpt_ctx->snow3g = 0;
2512 memcpy(zs_ctx->ci_key, key, key_len);
2513 memcpy(zs_ctx->zuc_const, zuc_d, 32);
2514 cpt_ctx->fc_type = ZUC_SNOW3G;
2515 cpt_ctx->zsk_flags = 0x1;
2518 /* Kasumi ECB mode */
2520 memcpy(k_ctx->ci_key, key, key_len);
2521 cpt_ctx->fc_type = KASUMI;
2522 cpt_ctx->zsk_flags = 0x1;
2525 memcpy(k_ctx->ci_key, key, key_len);
2526 cpt_ctx->fc_type = KASUMI;
2527 cpt_ctx->zsk_flags = 0x1;
2532 cpt_ctx->mac_len = 4;
2533 cpt_ctx->hash_type = type;
2537 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2538 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2539 cpt_ctx->fc_type = HASH_HMAC;
2542 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2545 /* For GMAC auth, cipher must be NULL */
2546 if (type == GMAC_TYPE)
2547 fctx->enc.enc_cipher = 0;
2549 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2550 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2554 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2555 memcpy(cpt_ctx->auth_key, key, key_len);
2556 cpt_ctx->auth_key_len = key_len;
2557 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2558 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2561 memcpy(fctx->hmac.opad, key, key_len);
2562 fctx->enc.auth_input_type = 1;
2567 static __rte_always_inline int
2568 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2569 struct cpt_sess_misc *sess)
2571 struct rte_crypto_aead_xform *aead_form;
2572 cipher_type_t enc_type = 0; /* NULL Cipher type */
2573 auth_type_t auth_type = 0; /* NULL Auth type */
2574 uint32_t cipher_key_len = 0;
2575 uint8_t aes_gcm = 0;
2576 aead_form = &xform->aead;
2577 void *ctx = SESS_PRIV(sess);
2579 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2580 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2581 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2582 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2583 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2584 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2586 CPT_LOG_DP_ERR("Unknown aead operation\n");
2589 switch (aead_form->algo) {
2590 case RTE_CRYPTO_AEAD_AES_GCM:
2592 cipher_key_len = 16;
2595 case RTE_CRYPTO_AEAD_AES_CCM:
2596 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2599 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2600 enc_type = CHACHA20;
2601 auth_type = POLY1305;
2602 cipher_key_len = 32;
2603 sess->chacha_poly = 1;
2606 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2610 if (aead_form->key.length < cipher_key_len) {
2611 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2612 (unsigned int long)aead_form->key.length);
2616 sess->aes_gcm = aes_gcm;
2617 sess->mac_len = aead_form->digest_length;
2618 sess->iv_offset = aead_form->iv.offset;
2619 sess->iv_length = aead_form->iv.length;
2620 sess->aad_length = aead_form->aad_length;
2622 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2623 aead_form->key.length, NULL)))
2626 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2627 aead_form->digest_length)))
2633 static __rte_always_inline int
2634 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2635 struct cpt_sess_misc *sess)
2637 struct rte_crypto_cipher_xform *c_form;
2638 struct cpt_ctx *ctx = SESS_PRIV(sess);
2639 cipher_type_t enc_type = 0; /* NULL Cipher type */
2640 uint32_t cipher_key_len = 0;
2641 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2643 c_form = &xform->cipher;
2645 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2646 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2647 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2648 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2649 if (xform->next != NULL &&
2650 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2651 /* Perform decryption followed by auth verify */
2655 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2659 switch (c_form->algo) {
2660 case RTE_CRYPTO_CIPHER_AES_CBC:
2662 cipher_key_len = 16;
2664 case RTE_CRYPTO_CIPHER_3DES_CBC:
2665 enc_type = DES3_CBC;
2666 cipher_key_len = 24;
2668 case RTE_CRYPTO_CIPHER_DES_CBC:
2669 /* DES is implemented using 3DES in hardware */
2670 enc_type = DES3_CBC;
2673 case RTE_CRYPTO_CIPHER_AES_CTR:
2675 cipher_key_len = 16;
2678 case RTE_CRYPTO_CIPHER_NULL:
2682 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2683 enc_type = KASUMI_F8_ECB;
2684 cipher_key_len = 16;
2687 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2688 enc_type = SNOW3G_UEA2;
2689 cipher_key_len = 16;
2692 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2693 enc_type = ZUC_EEA3;
2694 cipher_key_len = 16;
2697 case RTE_CRYPTO_CIPHER_AES_XTS:
2699 cipher_key_len = 16;
2701 case RTE_CRYPTO_CIPHER_3DES_ECB:
2702 enc_type = DES3_ECB;
2703 cipher_key_len = 24;
2705 case RTE_CRYPTO_CIPHER_AES_ECB:
2707 cipher_key_len = 16;
2709 case RTE_CRYPTO_CIPHER_3DES_CTR:
2710 case RTE_CRYPTO_CIPHER_AES_F8:
2711 case RTE_CRYPTO_CIPHER_ARC4:
2712 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2716 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2721 if (c_form->key.length < cipher_key_len) {
2722 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2723 (unsigned long) c_form->key.length);
2727 sess->zsk_flag = zsk_flag;
2729 sess->aes_ctr = aes_ctr;
2730 sess->iv_offset = c_form->iv.offset;
2731 sess->iv_length = c_form->iv.length;
2732 sess->is_null = is_null;
2734 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2735 c_form->key.data, c_form->key.length, NULL)))
2741 static __rte_always_inline int
2742 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2743 struct cpt_sess_misc *sess)
2745 struct cpt_ctx *ctx = SESS_PRIV(sess);
2746 struct rte_crypto_auth_xform *a_form;
2747 auth_type_t auth_type = 0; /* NULL Auth type */
2748 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2750 if (xform->next != NULL &&
2751 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2752 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2753 /* Perform auth followed by encryption */
2757 a_form = &xform->auth;
2759 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2760 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2761 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2762 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2764 CPT_LOG_DP_ERR("Unknown auth operation");
2768 switch (a_form->algo) {
2769 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2771 case RTE_CRYPTO_AUTH_SHA1:
2772 auth_type = SHA1_TYPE;
2774 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2775 case RTE_CRYPTO_AUTH_SHA256:
2776 auth_type = SHA2_SHA256;
2778 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2779 case RTE_CRYPTO_AUTH_SHA512:
2780 auth_type = SHA2_SHA512;
2782 case RTE_CRYPTO_AUTH_AES_GMAC:
2783 auth_type = GMAC_TYPE;
2786 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2787 case RTE_CRYPTO_AUTH_SHA224:
2788 auth_type = SHA2_SHA224;
2790 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2791 case RTE_CRYPTO_AUTH_SHA384:
2792 auth_type = SHA2_SHA384;
2794 case RTE_CRYPTO_AUTH_MD5_HMAC:
2795 case RTE_CRYPTO_AUTH_MD5:
2796 auth_type = MD5_TYPE;
2798 case RTE_CRYPTO_AUTH_KASUMI_F9:
2799 auth_type = KASUMI_F9_ECB;
2801 * Indicate that direction needs to be taken out
2806 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2807 auth_type = SNOW3G_UIA2;
2810 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2811 auth_type = ZUC_EIA3;
2814 case RTE_CRYPTO_AUTH_NULL:
2818 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2819 case RTE_CRYPTO_AUTH_AES_CMAC:
2820 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2821 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2825 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2830 sess->zsk_flag = zsk_flag;
2831 sess->aes_gcm = aes_gcm;
2832 sess->mac_len = a_form->digest_length;
2833 sess->is_null = is_null;
2835 sess->auth_iv_offset = a_form->iv.offset;
2836 sess->auth_iv_length = a_form->iv.length;
2838 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2839 a_form->key.data, a_form->key.length,
2840 a_form->digest_length)))
2846 static __rte_always_inline int
2847 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2848 struct cpt_sess_misc *sess)
2850 struct rte_crypto_auth_xform *a_form;
2851 cipher_type_t enc_type = 0; /* NULL Cipher type */
2852 auth_type_t auth_type = 0; /* NULL Auth type */
2853 void *ctx = SESS_PRIV(sess);
2855 a_form = &xform->auth;
2857 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2858 sess->cpt_op |= CPT_OP_ENCODE;
2859 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2860 sess->cpt_op |= CPT_OP_DECODE;
2862 CPT_LOG_DP_ERR("Unknown auth operation");
2866 switch (a_form->algo) {
2867 case RTE_CRYPTO_AUTH_AES_GMAC:
2869 auth_type = GMAC_TYPE;
2872 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2880 sess->iv_offset = a_form->iv.offset;
2881 sess->iv_length = a_form->iv.length;
2882 sess->mac_len = a_form->digest_length;
2884 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2885 a_form->key.length, NULL)))
2888 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2889 a_form->digest_length)))
2895 static __rte_always_inline void *
2896 alloc_op_meta(struct rte_mbuf *m_src,
2899 struct rte_mempool *cpt_meta_pool)
2903 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2904 if (likely(m_src && (m_src->nb_segs == 1))) {
2908 /* Check if tailroom is sufficient to hold meta data */
2909 tailroom = rte_pktmbuf_tailroom(m_src);
2910 if (likely(tailroom > len + 8)) {
2911 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2912 mphys = m_src->buf_iova + m_src->buf_len;
2916 buf->dma_addr = mphys;
2918 /* Indicate that this is a mbuf allocated mdata */
2919 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2924 RTE_SET_USED(m_src);
2927 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2931 buf->dma_addr = rte_mempool_virt2iova(mdata);
2938 * cpt_free_metabuf - free metabuf to mempool.
2939 * @param instance: pointer to instance.
2940 * @param objp: pointer to the metabuf.
2942 static __rte_always_inline void
2943 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2945 bool nofree = ((uintptr_t)mdata & 1ull);
2949 rte_mempool_put(cpt_meta_pool, mdata);
2952 static __rte_always_inline uint32_t
2953 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2954 iov_ptr_t *iovec, uint32_t start_offset)
2957 void *seg_data = NULL;
2958 phys_addr_t seg_phys;
2959 int32_t seg_size = 0;
2966 if (!start_offset) {
2967 seg_data = rte_pktmbuf_mtod(pkt, void *);
2968 seg_phys = rte_pktmbuf_iova(pkt);
2969 seg_size = pkt->data_len;
2971 while (start_offset >= pkt->data_len) {
2972 start_offset -= pkt->data_len;
2976 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2977 seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2978 seg_size = pkt->data_len - start_offset;
2984 iovec->bufs[index].vaddr = seg_data;
2985 iovec->bufs[index].dma_addr = seg_phys;
2986 iovec->bufs[index].size = seg_size;
2990 while (unlikely(pkt != NULL)) {
2991 seg_data = rte_pktmbuf_mtod(pkt, void *);
2992 seg_phys = rte_pktmbuf_iova(pkt);
2993 seg_size = pkt->data_len;
2997 iovec->bufs[index].vaddr = seg_data;
2998 iovec->bufs[index].dma_addr = seg_phys;
2999 iovec->bufs[index].size = seg_size;
3006 iovec->buf_cnt = index;
3010 static __rte_always_inline uint32_t
3011 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3016 void *seg_data = NULL;
3017 phys_addr_t seg_phys;
3018 uint32_t seg_size = 0;
3021 seg_data = rte_pktmbuf_mtod(pkt, void *);
3022 seg_phys = rte_pktmbuf_iova(pkt);
3023 seg_size = pkt->data_len;
3026 if (likely(!pkt->next)) {
3027 uint32_t headroom, tailroom;
3029 *flags |= SINGLE_BUF_INPLACE;
3030 headroom = rte_pktmbuf_headroom(pkt);
3031 tailroom = rte_pktmbuf_tailroom(pkt);
3032 if (likely((headroom >= 24) &&
3034 /* In 83XX this is prerequivisit for Direct mode */
3035 *flags |= SINGLE_BUF_HEADTAILROOM;
3037 param->bufs[0].vaddr = seg_data;
3038 param->bufs[0].dma_addr = seg_phys;
3039 param->bufs[0].size = seg_size;
3042 iovec = param->src_iov;
3043 iovec->bufs[index].vaddr = seg_data;
3044 iovec->bufs[index].dma_addr = seg_phys;
3045 iovec->bufs[index].size = seg_size;
3049 while (unlikely(pkt != NULL)) {
3050 seg_data = rte_pktmbuf_mtod(pkt, void *);
3051 seg_phys = rte_pktmbuf_iova(pkt);
3052 seg_size = pkt->data_len;
3057 iovec->bufs[index].vaddr = seg_data;
3058 iovec->bufs[index].dma_addr = seg_phys;
3059 iovec->bufs[index].size = seg_size;
3066 iovec->buf_cnt = index;
3070 static __rte_always_inline int
3071 fill_fc_params(struct rte_crypto_op *cop,
3072 struct cpt_sess_misc *sess_misc,
3073 struct cpt_qp_meta_info *m_info,
3078 struct rte_crypto_sym_op *sym_op = cop->sym;
3079 struct cpt_ctx *ctx = SESS_PRIV(sess_misc);
3082 uint32_t mc_hash_off;
3084 uint64_t d_offs, d_lens;
3085 struct rte_mbuf *m_src, *m_dst;
3086 uint8_t cpt_op = sess_misc->cpt_op;
3087 #ifdef CPT_ALWAYS_USE_SG_MODE
3088 uint8_t inplace = 0;
3090 uint8_t inplace = 1;
3092 fc_params_t fc_params;
3093 char src[SRC_IOV_SIZE];
3094 char dst[SRC_IOV_SIZE];
3098 if (likely(sess_misc->iv_length)) {
3099 flags |= VALID_IV_BUF;
3100 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3101 uint8_t *, sess_misc->iv_offset);
3102 if (sess_misc->aes_ctr &&
3103 unlikely(sess_misc->iv_length != 16)) {
3104 memcpy((uint8_t *)iv_buf,
3105 rte_crypto_op_ctod_offset(cop,
3106 uint8_t *, sess_misc->iv_offset), 12);
3107 iv_buf[3] = rte_cpu_to_be_32(0x1);
3108 fc_params.iv_buf = iv_buf;
3112 if (sess_misc->zsk_flag) {
3113 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3115 sess_misc->auth_iv_offset);
3116 if (sess_misc->zsk_flag != ZS_EA)
3119 m_src = sym_op->m_src;
3120 m_dst = sym_op->m_dst;
3122 if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3127 d_offs = sym_op->aead.data.offset;
3128 d_lens = sym_op->aead.data.length;
3129 mc_hash_off = sym_op->aead.data.offset +
3130 sym_op->aead.data.length;
3132 aad_data = sym_op->aead.aad.data;
3133 aad_len = sess_misc->aad_length;
3134 if (likely((aad_data + aad_len) ==
3135 rte_pktmbuf_mtod_offset(m_src,
3137 sym_op->aead.data.offset))) {
3138 d_offs = (d_offs - aad_len) | (d_offs << 16);
3139 d_lens = (d_lens + aad_len) | (d_lens << 32);
3141 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3142 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3143 fc_params.aad_buf.size = aad_len;
3144 flags |= VALID_AAD_BUF;
3146 d_offs = d_offs << 16;
3147 d_lens = d_lens << 32;
3150 salt = fc_params.iv_buf;
3151 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3152 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3153 sess_misc->salt = *(uint32_t *)salt;
3155 fc_params.iv_buf = salt + 4;
3156 if (likely(sess_misc->mac_len)) {
3157 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3163 /* hmac immediately following data is best case */
3164 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3166 (uint8_t *)sym_op->aead.digest.data)) {
3167 flags |= VALID_MAC_BUF;
3168 fc_params.mac_buf.size = sess_misc->mac_len;
3169 fc_params.mac_buf.vaddr =
3170 sym_op->aead.digest.data;
3171 fc_params.mac_buf.dma_addr =
3172 sym_op->aead.digest.phys_addr;
3177 d_offs = sym_op->cipher.data.offset;
3178 d_lens = sym_op->cipher.data.length;
3179 mc_hash_off = sym_op->cipher.data.offset +
3180 sym_op->cipher.data.length;
3181 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3182 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3184 if (mc_hash_off < (sym_op->auth.data.offset +
3185 sym_op->auth.data.length)){
3186 mc_hash_off = (sym_op->auth.data.offset +
3187 sym_op->auth.data.length);
3189 /* for gmac, salt should be updated like in gcm */
3190 if (unlikely(sess_misc->is_gmac)) {
3192 salt = fc_params.iv_buf;
3193 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3194 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3195 sess_misc->salt = *(uint32_t *)salt;
3197 fc_params.iv_buf = salt + 4;
3199 if (likely(sess_misc->mac_len)) {
3202 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3206 /* hmac immediately following data is best case */
3207 if (!ctx->dec_auth && !ctx->auth_enc &&
3208 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3210 (uint8_t *)sym_op->auth.digest.data))) {
3211 flags |= VALID_MAC_BUF;
3212 fc_params.mac_buf.size =
3214 fc_params.mac_buf.vaddr =
3215 sym_op->auth.digest.data;
3216 fc_params.mac_buf.dma_addr =
3217 sym_op->auth.digest.phys_addr;
3222 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3223 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3225 if (!ctx->dec_auth &&
3226 unlikely(sess_misc->is_null ||
3227 sess_misc->cpt_op == CPT_OP_DECODE))
3230 if (likely(!m_dst && inplace)) {
3231 /* Case of single buffer without AAD buf or
3232 * separate mac buf in place and
3235 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3237 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3240 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3246 /* Out of place processing */
3247 fc_params.src_iov = (void *)src;
3248 fc_params.dst_iov = (void *)dst;
3250 /* Store SG I/O in the api for reuse */
3251 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3252 CPT_LOG_DP_ERR("Prepare src iov failed");
3257 if (unlikely(m_dst != NULL)) {
3260 /* Try to make room as much as src has */
3261 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3263 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3264 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3265 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3266 CPT_LOG_DP_ERR("Not enough space in "
3275 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3276 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3282 fc_params.dst_iov = (void *)src;
3286 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3287 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3288 m_info->lb_mlen, m_info->pool);
3290 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3291 m_info->sg_mlen, m_info->pool);
3293 if (unlikely(mdata == NULL)) {
3294 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3299 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3300 op[0] = (uintptr_t)mdata;
3301 op[1] = (uintptr_t)cop;
3302 op[2] = op[3] = 0; /* Used to indicate auth verify */
3303 space += 4 * sizeof(uint64_t);
3305 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3306 fc_params.meta_buf.dma_addr += space;
3307 fc_params.meta_buf.size -= space;
3309 /* Finally prepare the instruction */
3310 if (cpt_op & CPT_OP_ENCODE)
3311 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3314 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3317 if (unlikely(*prep_req == NULL)) {
3318 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3320 goto free_mdata_and_exit;
3327 free_mdata_and_exit:
3328 free_op_meta(mdata, m_info->pool);
3333 static __rte_always_inline void
3334 compl_auth_verify(struct rte_crypto_op *op,
3339 struct rte_crypto_sym_op *sym_op = op->sym;
3341 if (sym_op->auth.digest.data)
3342 mac = sym_op->auth.digest.data;
3344 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3346 sym_op->auth.data.length +
3347 sym_op->auth.data.offset);
3349 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3353 if (memcmp(mac, gen_mac, mac_len))
3354 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3356 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3359 static __rte_always_inline void
3360 find_kasumif9_direction_and_length(uint8_t *src,
3361 uint32_t counter_num_bytes,
3362 uint32_t *addr_length_in_bits,
3363 uint8_t *addr_direction)
3368 while (!found && counter_num_bytes > 0) {
3369 counter_num_bytes--;
3370 if (src[counter_num_bytes] == 0x00)
3372 pos = rte_bsf32(src[counter_num_bytes]);
3374 if (likely(counter_num_bytes > 0)) {
3375 last_byte = src[counter_num_bytes - 1];
3376 *addr_direction = last_byte & 0x1;
3377 *addr_length_in_bits = counter_num_bytes * 8
3381 last_byte = src[counter_num_bytes];
3382 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3383 *addr_length_in_bits = counter_num_bytes * 8
3391 * This handles all auth only except AES_GMAC
3393 static __rte_always_inline int
3394 fill_digest_params(struct rte_crypto_op *cop,
3395 struct cpt_sess_misc *sess,
3396 struct cpt_qp_meta_info *m_info,
3401 struct rte_crypto_sym_op *sym_op = cop->sym;
3405 uint32_t auth_range_off;
3407 uint64_t d_offs = 0, d_lens;
3408 struct rte_mbuf *m_src, *m_dst;
3409 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3410 uint16_t mac_len = sess->mac_len;
3412 char src[SRC_IOV_SIZE];
3416 memset(¶ms, 0, sizeof(fc_params_t));
3418 m_src = sym_op->m_src;
3420 /* For just digest lets force mempool alloc */
3421 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3423 if (mdata == NULL) {
3428 mphys = params.meta_buf.dma_addr;
3431 op[0] = (uintptr_t)mdata;
3432 op[1] = (uintptr_t)cop;
3433 op[2] = op[3] = 0; /* Used to indicate auth verify */
3434 space += 4 * sizeof(uint64_t);
3436 auth_range_off = sym_op->auth.data.offset;
3438 flags = VALID_MAC_BUF;
3439 params.src_iov = (void *)src;
3440 if (unlikely(sess->zsk_flag)) {
3442 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3443 * we will send pass through even for auth only case,
3446 d_offs = auth_range_off;
3448 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3449 uint8_t *, sess->auth_iv_offset);
3450 if (sess->zsk_flag == K_F9) {
3451 uint32_t length_in_bits, num_bytes;
3452 uint8_t *src, direction = 0;
3454 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3457 * This is kasumi f9, take direction from
3460 length_in_bits = cop->sym->auth.data.length;
3461 num_bytes = (length_in_bits >> 3);
3462 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3463 find_kasumif9_direction_and_length(src,
3467 length_in_bits -= 64;
3468 cop->sym->auth.data.offset += 64;
3469 d_offs = cop->sym->auth.data.offset;
3470 auth_range_off = d_offs / 8;
3471 cop->sym->auth.data.length = length_in_bits;
3473 /* Store it at end of auth iv */
3474 iv_buf[8] = direction;
3475 params.auth_iv_buf = iv_buf;
3479 d_lens = sym_op->auth.data.length;
3481 params.ctx_buf.vaddr = SESS_PRIV(sess);
3482 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3484 if (auth_op == CPT_OP_AUTH_GENERATE) {
3485 if (sym_op->auth.digest.data) {
3487 * Digest to be generated
3488 * in separate buffer
3490 params.mac_buf.size =
3492 params.mac_buf.vaddr =
3493 sym_op->auth.digest.data;
3494 params.mac_buf.dma_addr =
3495 sym_op->auth.digest.phys_addr;
3497 uint32_t off = sym_op->auth.data.offset +
3498 sym_op->auth.data.length;
3499 int32_t dlen, space;
3501 m_dst = sym_op->m_dst ?
3502 sym_op->m_dst : sym_op->m_src;
3503 dlen = rte_pktmbuf_pkt_len(m_dst);
3505 space = off + mac_len - dlen;
3507 if (!rte_pktmbuf_append(m_dst, space)) {
3508 CPT_LOG_DP_ERR("Failed to extend "
3509 "mbuf by %uB", space);
3511 goto free_mdata_and_exit;
3514 params.mac_buf.vaddr =
3515 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3516 params.mac_buf.dma_addr =
3517 rte_pktmbuf_iova_offset(m_dst, off);
3518 params.mac_buf.size = mac_len;
3521 /* Need space for storing generated mac */
3522 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3523 params.mac_buf.dma_addr = mphys + space;
3524 params.mac_buf.size = mac_len;
3525 space += RTE_ALIGN_CEIL(mac_len, 8);
3526 op[2] = (uintptr_t)params.mac_buf.vaddr;
3530 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3531 params.meta_buf.dma_addr = mphys + space;
3532 params.meta_buf.size -= space;
3534 /* Out of place processing */
3535 params.src_iov = (void *)src;
3537 /*Store SG I/O in the api for reuse */
3538 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3539 CPT_LOG_DP_ERR("Prepare src iov failed");
3541 goto free_mdata_and_exit;
3544 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3545 if (unlikely(*prep_req == NULL)) {
3547 goto free_mdata_and_exit;
3554 free_mdata_and_exit:
3555 free_op_meta(mdata, m_info->pool);
3560 #endif /*_CPT_UCODE_H_ */