1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline int
39 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 uint16_t mac_len = auth->digest_length;
45 case RTE_CRYPTO_AUTH_MD5:
46 case RTE_CRYPTO_AUTH_MD5_HMAC:
47 ret = (mac_len == 16) ? 0 : -1;
49 case RTE_CRYPTO_AUTH_SHA1:
50 case RTE_CRYPTO_AUTH_SHA1_HMAC:
51 ret = (mac_len == 20) ? 0 : -1;
53 case RTE_CRYPTO_AUTH_SHA224:
54 case RTE_CRYPTO_AUTH_SHA224_HMAC:
55 ret = (mac_len == 28) ? 0 : -1;
57 case RTE_CRYPTO_AUTH_SHA256:
58 case RTE_CRYPTO_AUTH_SHA256_HMAC:
59 ret = (mac_len == 32) ? 0 : -1;
61 case RTE_CRYPTO_AUTH_SHA384:
62 case RTE_CRYPTO_AUTH_SHA384_HMAC:
63 ret = (mac_len == 48) ? 0 : -1;
65 case RTE_CRYPTO_AUTH_SHA512:
66 case RTE_CRYPTO_AUTH_SHA512_HMAC:
67 ret = (mac_len == 64) ? 0 : -1;
69 case RTE_CRYPTO_AUTH_NULL:
79 static __rte_always_inline void
80 cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
83 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline int
88 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
100 static __rte_always_inline int
101 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
117 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 key_len = key_len / 2;
126 if (unlikely(key_len == 24)) {
127 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
130 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
136 if (unlikely(key_len != 16))
138 /* No support for AEAD yet */
139 if (unlikely(ctx->hash_type))
141 fc_type = ZUC_SNOW3G;
145 if (unlikely(key_len != 16))
147 /* No support for AEAD yet */
148 if (unlikely(ctx->hash_type))
156 ctx->fc_type = fc_type;
160 static __rte_always_inline void
161 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
163 cpt_ctx->enc_cipher = 0;
164 fctx->enc.enc_cipher = 0;
167 static __rte_always_inline void
168 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
170 mc_aes_type_t aes_key_type = 0;
173 aes_key_type = AES_128_BIT;
176 aes_key_type = AES_192_BIT;
179 aes_key_type = AES_256_BIT;
182 /* This should not happen */
183 CPT_LOG_DP_ERR("Invalid AES key len");
186 fctx->enc.aes_key = aes_key_type;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
197 gen_key_snow3g(key, keyx);
198 memcpy(zs_ctx->ci_key, keyx, key_len);
199 cpt_ctx->zsk_flags = 0;
202 static __rte_always_inline void
203 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
206 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
209 memcpy(zs_ctx->ci_key, key, key_len);
210 memcpy(zs_ctx->zuc_const, zuc_d, 32);
211 cpt_ctx->zsk_flags = 0;
214 static __rte_always_inline void
215 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
218 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
221 memcpy(k_ctx->ci_key, key, key_len);
222 cpt_ctx->zsk_flags = 0;
225 static __rte_always_inline void
226 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
229 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
231 memcpy(k_ctx->ci_key, key, key_len);
232 cpt_ctx->zsk_flags = 0;
235 static __rte_always_inline int
236 cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
237 const uint8_t *key, uint16_t key_len, uint8_t *salt)
239 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
242 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
246 if (cpt_ctx->fc_type == FC_GEN) {
248 * We need to always say IV is from DPTR as user can
249 * sometimes override IV per operation.
251 fctx->enc.iv_source = CPT_FROM_DPTR;
253 if (cpt_ctx->auth_key_len > 64)
259 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
262 /* CPT performs DES using 3DES with the 8B DES-key
263 * replicated 2 more times to match the 24B 3DES-key.
264 * Eg. If org. key is "0x0a 0x0b", then new key is
265 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
268 /* Skipping the first 8B as it will be copied
269 * in the regular code flow
271 memcpy(fctx->enc.encr_key+key_len, key, key_len);
272 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
276 /* For DES3_ECB IV need to be from CTX. */
277 fctx->enc.iv_source = CPT_FROM_CTX;
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 /* Even though iv source is from dptr,
288 * aes_gcm salt is taken from ctx
291 memcpy(fctx->enc.encr_iv, salt, 4);
292 /* Assuming it was just salt update
298 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
301 key_len = key_len / 2;
302 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
304 /* Copy key2 for XTS into ipad */
305 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
306 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
309 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
312 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
315 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
318 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
324 /* Only for FC_GEN case */
326 /* For GMAC auth, cipher must be NULL */
327 if (cpt_ctx->hash_type != GMAC_TYPE)
328 fctx->enc.enc_cipher = type;
330 memcpy(fctx->enc.encr_key, key, key_len);
333 cpt_ctx->enc_cipher = type;
338 static __rte_always_inline uint32_t
339 fill_sg_comp(sg_comp_t *list,
341 phys_addr_t dma_addr,
344 sg_comp_t *to = &list[i>>2];
346 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
347 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
352 static __rte_always_inline uint32_t
353 fill_sg_comp_from_buf(sg_comp_t *list,
357 sg_comp_t *to = &list[i>>2];
359 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
360 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
365 static __rte_always_inline uint32_t
366 fill_sg_comp_from_buf_min(sg_comp_t *list,
371 sg_comp_t *to = &list[i >> 2];
372 uint32_t size = *psize;
375 e_len = (size > from->size) ? from->size : size;
376 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
377 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
384 * This fills the MC expected SGIO list
385 * from IOV given by user.
387 static __rte_always_inline uint32_t
388 fill_sg_comp_from_iov(sg_comp_t *list,
390 iov_ptr_t *from, uint32_t from_offset,
391 uint32_t *psize, buf_ptr_t *extra_buf,
392 uint32_t extra_offset)
395 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
396 uint32_t size = *psize;
398 for (j = 0; (j < from->buf_cnt) && size; j++) {
399 phys_addr_t dma_addr = from->bufs[j].dma_addr;
400 uint32_t buf_sz = from->bufs[j].size;
401 sg_comp_t *to = &list[i >> 2];
402 phys_addr_t e_dma_addr;
405 if (unlikely(from_offset)) {
406 if (from_offset >= buf_sz) {
407 from_offset -= buf_sz;
410 e_dma_addr = dma_addr + from_offset;
411 e_len = (size > (buf_sz - from_offset)) ?
412 (buf_sz - from_offset) : size;
415 e_dma_addr = dma_addr;
416 e_len = (size > buf_sz) ? buf_sz : size;
419 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
420 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
422 if (extra_len && (e_len >= extra_offset)) {
423 /* Break the data at given offset */
424 uint32_t next_len = e_len - extra_offset;
425 phys_addr_t next_dma = e_dma_addr + extra_offset;
430 e_len = extra_offset;
432 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
435 extra_len = RTE_MIN(extra_len, size);
436 /* Insert extra data ptr */
441 rte_cpu_to_be_16(extra_len);
443 rte_cpu_to_be_64(extra_buf->dma_addr);
447 next_len = RTE_MIN(next_len, size);
448 /* insert the rest of the data */
452 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
453 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
462 extra_offset -= size;
470 static __rte_always_inline void
471 cpt_digest_gen_prep(uint32_t flags,
473 digest_params_t *params,
477 struct cpt_request_info *req;
479 uint16_t data_len, mac_len, key_len;
480 auth_type_t hash_type;
483 sg_comp_t *gather_comp;
484 sg_comp_t *scatter_comp;
486 uint32_t g_size_bytes, s_size_bytes;
487 uint64_t dptr_dma, rptr_dma;
488 vq_cmd_word0_t vq_cmd_w0;
489 void *c_vaddr, *m_vaddr;
490 uint64_t c_dma, m_dma;
492 ctx = params->ctx_buf.vaddr;
493 meta_p = ¶ms->meta_buf;
495 m_vaddr = meta_p->vaddr;
496 m_dma = meta_p->dma_addr;
499 * Save initial space that followed app data for completion code &
500 * alternate completion code to fall in same cache line as app data
502 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
503 m_dma += COMPLETION_CODE_SIZE;
504 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
506 c_vaddr = (uint8_t *)m_vaddr + size;
507 c_dma = m_dma + size;
508 size += sizeof(cpt_res_s_t);
510 m_vaddr = (uint8_t *)m_vaddr + size;
515 size = sizeof(struct cpt_request_info);
516 m_vaddr = (uint8_t *)m_vaddr + size;
519 hash_type = ctx->hash_type;
520 mac_len = ctx->mac_len;
521 key_len = ctx->auth_key_len;
522 data_len = AUTH_DLEN(d_lens);
525 vq_cmd_w0.s.opcode.minor = 0;
526 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
528 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
529 vq_cmd_w0.s.param1 = key_len;
530 vq_cmd_w0.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
532 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
533 vq_cmd_w0.s.param1 = 0;
534 vq_cmd_w0.s.dlen = data_len;
537 /* Null auth only case enters the if */
538 if (unlikely(!hash_type && !ctx->enc_cipher)) {
539 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
540 /* Minor op is passthrough */
541 vq_cmd_w0.s.opcode.minor = 0x03;
542 /* Send out completion code only */
543 vq_cmd_w0.s.param2 = 0x1;
546 /* DPTR has SG list */
550 ((uint16_t *)in_buffer)[0] = 0;
551 ((uint16_t *)in_buffer)[1] = 0;
553 /* TODO Add error check if space will be sufficient */
554 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
563 uint64_t k_dma = ctx->auth_key_iova;
565 i = fill_sg_comp(gather_comp, i, k_dma,
566 RTE_ALIGN_CEIL(key_len, 8));
572 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
574 if (unlikely(size)) {
575 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
581 * Looks like we need to support zero data
582 * gather ptr in case of hash & hmac
586 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
587 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
594 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
596 if (flags & VALID_MAC_BUF) {
597 if (unlikely(params->mac_buf.size < mac_len)) {
598 CPT_LOG_DP_ERR("Insufficient MAC size");
603 i = fill_sg_comp_from_buf_min(scatter_comp, i,
604 ¶ms->mac_buf, &size);
607 i = fill_sg_comp_from_iov(scatter_comp, i,
608 params->src_iov, data_len,
610 if (unlikely(size)) {
611 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
617 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
618 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
620 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
622 /* This is DPTR len incase of SG mode */
623 vq_cmd_w0.s.dlen = size;
625 m_vaddr = (uint8_t *)m_vaddr + size;
628 /* cpt alternate completion address saved earlier */
629 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
630 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
631 rptr_dma = c_dma - 8;
633 req->ist.ei1 = dptr_dma;
634 req->ist.ei2 = rptr_dma;
636 /* 16 byte aligned cpt res address */
637 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
638 *req->completion_addr = COMPLETION_CODE_INIT;
639 req->comp_baddr = c_dma;
641 /* Fill microcode part of instruction */
642 req->ist.ei0 = vq_cmd_w0.u64;
650 static __rte_always_inline void
651 cpt_enc_hmac_prep(uint32_t flags,
654 fc_params_t *fc_params,
658 uint32_t iv_offset = 0;
659 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
660 struct cpt_ctx *cpt_ctx;
661 uint32_t cipher_type, hash_type;
662 uint32_t mac_len, size;
664 struct cpt_request_info *req;
665 buf_ptr_t *meta_p, *aad_buf = NULL;
666 uint32_t encr_offset, auth_offset;
667 uint32_t encr_data_len, auth_data_len, aad_len = 0;
668 uint32_t passthrough_len = 0;
669 void *m_vaddr, *offset_vaddr;
670 uint64_t m_dma, offset_dma;
671 vq_cmd_word0_t vq_cmd_w0;
675 meta_p = &fc_params->meta_buf;
676 m_vaddr = meta_p->vaddr;
677 m_dma = meta_p->dma_addr;
679 encr_offset = ENCR_OFFSET(d_offs);
680 auth_offset = AUTH_OFFSET(d_offs);
681 encr_data_len = ENCR_DLEN(d_lens);
682 auth_data_len = AUTH_DLEN(d_lens);
683 if (unlikely(flags & VALID_AAD_BUF)) {
685 * We dont support both aad
686 * and auth data separately
690 aad_len = fc_params->aad_buf.size;
691 aad_buf = &fc_params->aad_buf;
693 cpt_ctx = fc_params->ctx_buf.vaddr;
694 cipher_type = cpt_ctx->enc_cipher;
695 hash_type = cpt_ctx->hash_type;
696 mac_len = cpt_ctx->mac_len;
699 * Save initial space that followed app data for completion code &
700 * alternate completion code to fall in same cache line as app data
702 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
703 m_dma += COMPLETION_CODE_SIZE;
704 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
707 c_vaddr = (uint8_t *)m_vaddr + size;
708 c_dma = m_dma + size;
709 size += sizeof(cpt_res_s_t);
711 m_vaddr = (uint8_t *)m_vaddr + size;
714 /* start cpt request info struct at 8 byte boundary */
715 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
718 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
720 size += sizeof(struct cpt_request_info);
721 m_vaddr = (uint8_t *)m_vaddr + size;
724 if (unlikely(!(flags & VALID_IV_BUF))) {
726 iv_offset = ENCR_IV_OFFSET(d_offs);
729 if (unlikely(flags & VALID_AAD_BUF)) {
731 * When AAD is given, data above encr_offset is pass through
732 * Since AAD is given as separate pointer and not as offset,
733 * this is a special case as we need to fragment input data
734 * into passthrough + encr_data and then insert AAD in between.
736 if (hash_type != GMAC_TYPE) {
737 passthrough_len = encr_offset;
738 auth_offset = passthrough_len + iv_len;
739 encr_offset = passthrough_len + aad_len + iv_len;
740 auth_data_len = aad_len + encr_data_len;
742 passthrough_len = 16 + aad_len;
743 auth_offset = passthrough_len + iv_len;
744 auth_data_len = aad_len;
747 encr_offset += iv_len;
748 auth_offset += iv_len;
752 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
753 vq_cmd_w0.s.opcode.minor = CPT_FC_MINOR_OP_ENCRYPT;
754 vq_cmd_w0.s.opcode.minor |= (cpt_ctx->auth_enc <<
755 CPT_HMAC_FIRST_BIT_POS);
757 if (hash_type == GMAC_TYPE) {
762 auth_dlen = auth_offset + auth_data_len;
763 enc_dlen = encr_data_len + encr_offset;
764 if (unlikely(encr_data_len & 0xf)) {
765 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
766 enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
768 else if (likely((cipher_type == AES_CBC) ||
769 (cipher_type == AES_ECB)))
770 enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
774 if (unlikely(auth_dlen > enc_dlen)) {
775 inputlen = auth_dlen;
776 outputlen = auth_dlen + mac_len;
779 outputlen = enc_dlen + mac_len;
782 if (cpt_ctx->auth_enc != 0)
783 outputlen = enc_dlen;
786 vq_cmd_w0.s.param1 = encr_data_len;
787 vq_cmd_w0.s.param2 = auth_data_len;
789 * In 83XX since we have a limitation of
790 * IV & Offset control word not part of instruction
791 * and need to be part of Data Buffer, we check if
792 * head room is there and then only do the Direct mode processing
794 if (likely((flags & SINGLE_BUF_INPLACE) &&
795 (flags & SINGLE_BUF_HEADTAILROOM))) {
796 void *dm_vaddr = fc_params->bufs[0].vaddr;
797 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
799 * This flag indicates that there is 24 bytes head room and
800 * 8 bytes tail room available, so that we get to do
801 * DIRECT MODE with limitation
804 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
805 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
808 req->ist.ei1 = offset_dma;
809 /* RPTR should just exclude offset control word */
810 req->ist.ei2 = dm_dma_addr - iv_len;
811 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
812 + outputlen - iv_len);
814 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
816 if (likely(iv_len)) {
817 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
819 uint64_t *src = fc_params->iv_buf;
824 *(uint64_t *)offset_vaddr =
825 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
826 ((uint64_t)iv_offset << 8) |
827 ((uint64_t)auth_offset));
830 uint32_t i, g_size_bytes, s_size_bytes;
831 uint64_t dptr_dma, rptr_dma;
832 sg_comp_t *gather_comp;
833 sg_comp_t *scatter_comp;
836 /* This falls under strict SG mode */
837 offset_vaddr = m_vaddr;
839 size = OFF_CTRL_LEN + iv_len;
841 m_vaddr = (uint8_t *)m_vaddr + size;
844 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
846 if (likely(iv_len)) {
847 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
849 uint64_t *src = fc_params->iv_buf;
854 *(uint64_t *)offset_vaddr =
855 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
856 ((uint64_t)iv_offset << 8) |
857 ((uint64_t)auth_offset));
859 /* DPTR has SG list */
863 ((uint16_t *)in_buffer)[0] = 0;
864 ((uint16_t *)in_buffer)[1] = 0;
866 /* TODO Add error check if space will be sufficient */
867 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
875 /* Offset control word that includes iv */
876 i = fill_sg_comp(gather_comp, i, offset_dma,
877 OFF_CTRL_LEN + iv_len);
880 size = inputlen - iv_len;
882 uint32_t aad_offset = aad_len ? passthrough_len : 0;
884 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
885 i = fill_sg_comp_from_buf_min(gather_comp, i,
889 i = fill_sg_comp_from_iov(gather_comp, i,
892 aad_buf, aad_offset);
895 if (unlikely(size)) {
896 CPT_LOG_DP_ERR("Insufficient buffer space,"
897 " size %d needed", size);
901 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
902 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
905 * Output Scatter list
909 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
912 if (likely(iv_len)) {
913 i = fill_sg_comp(scatter_comp, i,
914 offset_dma + OFF_CTRL_LEN,
918 /* output data or output data + digest*/
919 if (unlikely(flags & VALID_MAC_BUF)) {
920 size = outputlen - iv_len - mac_len;
922 uint32_t aad_offset =
923 aad_len ? passthrough_len : 0;
925 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
926 i = fill_sg_comp_from_buf_min(
932 i = fill_sg_comp_from_iov(scatter_comp,
940 if (unlikely(size)) {
941 CPT_LOG_DP_ERR("Insufficient buffer"
942 " space, size %d needed",
949 i = fill_sg_comp_from_buf(scatter_comp, i,
950 &fc_params->mac_buf);
953 /* Output including mac */
954 size = outputlen - iv_len;
956 uint32_t aad_offset =
957 aad_len ? passthrough_len : 0;
959 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
960 i = fill_sg_comp_from_buf_min(
966 i = fill_sg_comp_from_iov(scatter_comp,
974 if (unlikely(size)) {
975 CPT_LOG_DP_ERR("Insufficient buffer"
976 " space, size %d needed",
982 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
983 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
985 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
987 /* This is DPTR len incase of SG mode */
988 vq_cmd_w0.s.dlen = size;
990 m_vaddr = (uint8_t *)m_vaddr + size;
993 /* cpt alternate completion address saved earlier */
994 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
995 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
996 rptr_dma = c_dma - 8;
998 req->ist.ei1 = dptr_dma;
999 req->ist.ei2 = rptr_dma;
1002 if (unlikely((encr_offset >> 16) ||
1004 (auth_offset >> 8))) {
1005 CPT_LOG_DP_ERR("Offset not supported");
1006 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1007 CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1008 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1012 /* 16 byte aligned cpt res address */
1013 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1014 *req->completion_addr = COMPLETION_CODE_INIT;
1015 req->comp_baddr = c_dma;
1017 /* Fill microcode part of instruction */
1018 req->ist.ei0 = vq_cmd_w0.u64;
1026 static __rte_always_inline void
1027 cpt_dec_hmac_prep(uint32_t flags,
1030 fc_params_t *fc_params,
1034 uint32_t iv_offset = 0, size;
1035 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1036 struct cpt_ctx *cpt_ctx;
1037 int32_t hash_type, mac_len;
1038 uint8_t iv_len = 16;
1039 struct cpt_request_info *req;
1040 buf_ptr_t *meta_p, *aad_buf = NULL;
1041 uint32_t encr_offset, auth_offset;
1042 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1043 uint32_t passthrough_len = 0;
1044 void *m_vaddr, *offset_vaddr;
1045 uint64_t m_dma, offset_dma;
1046 vq_cmd_word0_t vq_cmd_w0;
1050 meta_p = &fc_params->meta_buf;
1051 m_vaddr = meta_p->vaddr;
1052 m_dma = meta_p->dma_addr;
1054 encr_offset = ENCR_OFFSET(d_offs);
1055 auth_offset = AUTH_OFFSET(d_offs);
1056 encr_data_len = ENCR_DLEN(d_lens);
1057 auth_data_len = AUTH_DLEN(d_lens);
1059 if (unlikely(flags & VALID_AAD_BUF)) {
1061 * We dont support both aad
1062 * and auth data separately
1066 aad_len = fc_params->aad_buf.size;
1067 aad_buf = &fc_params->aad_buf;
1070 cpt_ctx = fc_params->ctx_buf.vaddr;
1071 hash_type = cpt_ctx->hash_type;
1072 mac_len = cpt_ctx->mac_len;
1074 if (unlikely(!(flags & VALID_IV_BUF))) {
1076 iv_offset = ENCR_IV_OFFSET(d_offs);
1079 if (unlikely(flags & VALID_AAD_BUF)) {
1081 * When AAD is given, data above encr_offset is pass through
1082 * Since AAD is given as separate pointer and not as offset,
1083 * this is a special case as we need to fragment input data
1084 * into passthrough + encr_data and then insert AAD in between.
1086 if (hash_type != GMAC_TYPE) {
1087 passthrough_len = encr_offset;
1088 auth_offset = passthrough_len + iv_len;
1089 encr_offset = passthrough_len + aad_len + iv_len;
1090 auth_data_len = aad_len + encr_data_len;
1092 passthrough_len = 16 + aad_len;
1093 auth_offset = passthrough_len + iv_len;
1094 auth_data_len = aad_len;
1097 encr_offset += iv_len;
1098 auth_offset += iv_len;
1102 * Save initial space that followed app data for completion code &
1103 * alternate completion code to fall in same cache line as app data
1105 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1106 m_dma += COMPLETION_CODE_SIZE;
1107 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1109 c_vaddr = (uint8_t *)m_vaddr + size;
1110 c_dma = m_dma + size;
1111 size += sizeof(cpt_res_s_t);
1113 m_vaddr = (uint8_t *)m_vaddr + size;
1116 /* start cpt request info structure at 8 byte alignment */
1117 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1120 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1122 size += sizeof(struct cpt_request_info);
1123 m_vaddr = (uint8_t *)m_vaddr + size;
1127 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
1128 vq_cmd_w0.s.opcode.minor = CPT_FC_MINOR_OP_DECRYPT;
1129 vq_cmd_w0.s.opcode.minor |= (cpt_ctx->dec_auth <<
1130 CPT_HMAC_FIRST_BIT_POS);
1132 if (hash_type == GMAC_TYPE) {
1137 enc_dlen = encr_offset + encr_data_len;
1138 auth_dlen = auth_offset + auth_data_len;
1140 if (auth_dlen > enc_dlen) {
1141 inputlen = auth_dlen + mac_len;
1142 outputlen = auth_dlen;
1144 inputlen = enc_dlen + mac_len;
1145 outputlen = enc_dlen;
1148 if (cpt_ctx->dec_auth != 0)
1149 outputlen = inputlen = enc_dlen;
1151 vq_cmd_w0.s.param1 = encr_data_len;
1152 vq_cmd_w0.s.param2 = auth_data_len;
1155 * In 83XX since we have a limitation of
1156 * IV & Offset control word not part of instruction
1157 * and need to be part of Data Buffer, we check if
1158 * head room is there and then only do the Direct mode processing
1160 if (likely((flags & SINGLE_BUF_INPLACE) &&
1161 (flags & SINGLE_BUF_HEADTAILROOM))) {
1162 void *dm_vaddr = fc_params->bufs[0].vaddr;
1163 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1165 * This flag indicates that there is 24 bytes head room and
1166 * 8 bytes tail room available, so that we get to do
1167 * DIRECT MODE with limitation
1170 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1171 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1172 req->ist.ei1 = offset_dma;
1174 /* RPTR should just exclude offset control word */
1175 req->ist.ei2 = dm_dma_addr - iv_len;
1177 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1178 outputlen - iv_len);
1179 /* since this is decryption,
1180 * don't touch the content of
1181 * alternate ccode space as it contains
1185 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1187 if (likely(iv_len)) {
1188 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1190 uint64_t *src = fc_params->iv_buf;
1195 if (unlikely((encr_offset >> 16) ||
1197 (auth_offset >> 8))) {
1198 CPT_LOG_DP_ERR("Offset not supported");
1199 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1200 CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1201 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1205 *(uint64_t *)offset_vaddr =
1206 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1207 ((uint64_t)iv_offset << 8) |
1208 ((uint64_t)auth_offset));
1211 uint64_t dptr_dma, rptr_dma;
1212 uint32_t g_size_bytes, s_size_bytes;
1213 sg_comp_t *gather_comp;
1214 sg_comp_t *scatter_comp;
1218 /* This falls under strict SG mode */
1219 offset_vaddr = m_vaddr;
1221 size = OFF_CTRL_LEN + iv_len;
1223 m_vaddr = (uint8_t *)m_vaddr + size;
1226 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1228 if (likely(iv_len)) {
1229 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1231 uint64_t *src = fc_params->iv_buf;
1236 if (unlikely((encr_offset >> 16) ||
1238 (auth_offset >> 8))) {
1239 CPT_LOG_DP_ERR("Offset not supported");
1240 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1241 CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1242 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1246 *(uint64_t *)offset_vaddr =
1247 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1248 ((uint64_t)iv_offset << 8) |
1249 ((uint64_t)auth_offset));
1251 /* DPTR has SG list */
1252 in_buffer = m_vaddr;
1255 ((uint16_t *)in_buffer)[0] = 0;
1256 ((uint16_t *)in_buffer)[1] = 0;
1258 /* TODO Add error check if space will be sufficient */
1259 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1266 /* Offset control word that includes iv */
1267 i = fill_sg_comp(gather_comp, i, offset_dma,
1268 OFF_CTRL_LEN + iv_len);
1270 /* Add input data */
1271 if (flags & VALID_MAC_BUF) {
1272 size = inputlen - iv_len - mac_len;
1274 /* input data only */
1275 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1276 i = fill_sg_comp_from_buf_min(
1281 uint32_t aad_offset = aad_len ?
1282 passthrough_len : 0;
1284 i = fill_sg_comp_from_iov(gather_comp,
1291 if (unlikely(size)) {
1292 CPT_LOG_DP_ERR("Insufficient buffer"
1293 " space, size %d needed",
1301 i = fill_sg_comp_from_buf(gather_comp, i,
1302 &fc_params->mac_buf);
1305 /* input data + mac */
1306 size = inputlen - iv_len;
1308 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1309 i = fill_sg_comp_from_buf_min(
1314 uint32_t aad_offset = aad_len ?
1315 passthrough_len : 0;
1317 if (unlikely(!fc_params->src_iov)) {
1318 CPT_LOG_DP_ERR("Bad input args");
1322 i = fill_sg_comp_from_iov(
1330 if (unlikely(size)) {
1331 CPT_LOG_DP_ERR("Insufficient buffer"
1332 " space, size %d needed",
1338 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1339 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1342 * Output Scatter List
1347 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1351 i = fill_sg_comp(scatter_comp, i,
1352 offset_dma + OFF_CTRL_LEN,
1356 /* Add output data */
1357 size = outputlen - iv_len;
1359 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1360 /* handle single buffer here */
1361 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1365 uint32_t aad_offset = aad_len ?
1366 passthrough_len : 0;
1368 if (unlikely(!fc_params->dst_iov)) {
1369 CPT_LOG_DP_ERR("Bad input args");
1373 i = fill_sg_comp_from_iov(scatter_comp, i,
1374 fc_params->dst_iov, 0,
1379 if (unlikely(size)) {
1380 CPT_LOG_DP_ERR("Insufficient buffer space,"
1381 " size %d needed", size);
1386 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1387 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1389 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1391 /* This is DPTR len incase of SG mode */
1392 vq_cmd_w0.s.dlen = size;
1394 m_vaddr = (uint8_t *)m_vaddr + size;
1397 /* cpt alternate completion address saved earlier */
1398 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1399 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1400 rptr_dma = c_dma - 8;
1401 size += COMPLETION_CODE_SIZE;
1403 req->ist.ei1 = dptr_dma;
1404 req->ist.ei2 = rptr_dma;
1407 /* 16 byte aligned cpt res address */
1408 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1409 *req->completion_addr = COMPLETION_CODE_INIT;
1410 req->comp_baddr = c_dma;
1412 /* Fill microcode part of instruction */
1413 req->ist.ei0 = vq_cmd_w0.u64;
1421 static __rte_always_inline void
1422 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1425 fc_params_t *params,
1430 int32_t inputlen, outputlen;
1431 struct cpt_ctx *cpt_ctx;
1432 uint32_t mac_len = 0;
1434 struct cpt_request_info *req;
1436 uint32_t encr_offset = 0, auth_offset = 0;
1437 uint32_t encr_data_len = 0, auth_data_len = 0;
1438 int flags, iv_len = 16;
1439 void *m_vaddr, *c_vaddr;
1440 uint64_t m_dma, c_dma, offset_ctrl;
1441 uint64_t *offset_vaddr, offset_dma;
1442 uint32_t *iv_s, iv[4];
1443 vq_cmd_word0_t vq_cmd_w0;
1445 buf_p = ¶ms->meta_buf;
1446 m_vaddr = buf_p->vaddr;
1447 m_dma = buf_p->dma_addr;
1449 cpt_ctx = params->ctx_buf.vaddr;
1450 flags = cpt_ctx->zsk_flags;
1451 mac_len = cpt_ctx->mac_len;
1452 snow3g = cpt_ctx->snow3g;
1455 * Save initial space that followed app data for completion code &
1456 * alternate completion code to fall in same cache line as app data
1458 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1459 m_dma += COMPLETION_CODE_SIZE;
1460 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1463 c_vaddr = (uint8_t *)m_vaddr + size;
1464 c_dma = m_dma + size;
1465 size += sizeof(cpt_res_s_t);
1467 m_vaddr = (uint8_t *)m_vaddr + size;
1470 /* Reserve memory for cpt request info */
1473 size = sizeof(struct cpt_request_info);
1474 m_vaddr = (uint8_t *)m_vaddr + size;
1477 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1479 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1481 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1482 (0 << 3) | (flags & 0x7));
1486 * Microcode expects offsets in bytes
1487 * TODO: Rounding off
1489 auth_data_len = AUTH_DLEN(d_lens);
1492 auth_offset = AUTH_OFFSET(d_offs);
1493 auth_offset = auth_offset / 8;
1495 /* consider iv len */
1496 auth_offset += iv_len;
1498 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1499 outputlen = mac_len;
1501 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1506 * Microcode expects offsets in bytes
1507 * TODO: Rounding off
1509 encr_data_len = ENCR_DLEN(d_lens);
1511 encr_offset = ENCR_OFFSET(d_offs);
1512 encr_offset = encr_offset / 8;
1513 /* consider iv len */
1514 encr_offset += iv_len;
1516 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1517 outputlen = inputlen;
1519 /* iv offset is 0 */
1520 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1523 if (unlikely((encr_offset >> 16) ||
1524 (auth_offset >> 8))) {
1525 CPT_LOG_DP_ERR("Offset not supported");
1526 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1527 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1532 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1537 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1538 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1541 for (j = 0; j < 4; j++)
1542 iv[j] = iv_s[3 - j];
1544 /* ZUC doesn't need a swap */
1545 for (j = 0; j < 4; j++)
1550 * GP op header, lengths are expected in bits.
1552 vq_cmd_w0.s.param1 = encr_data_len;
1553 vq_cmd_w0.s.param2 = auth_data_len;
1556 * In 83XX since we have a limitation of
1557 * IV & Offset control word not part of instruction
1558 * and need to be part of Data Buffer, we check if
1559 * head room is there and then only do the Direct mode processing
1561 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1562 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1563 void *dm_vaddr = params->bufs[0].vaddr;
1564 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1566 * This flag indicates that there is 24 bytes head room and
1567 * 8 bytes tail room available, so that we get to do
1568 * DIRECT MODE with limitation
1571 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1572 OFF_CTRL_LEN - iv_len);
1573 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1576 req->ist.ei1 = offset_dma;
1577 /* RPTR should just exclude offset control word */
1578 req->ist.ei2 = dm_dma_addr - iv_len;
1579 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1580 + outputlen - iv_len);
1582 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1584 if (likely(iv_len)) {
1585 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1587 memcpy(iv_d, iv, 16);
1590 *offset_vaddr = offset_ctrl;
1592 uint32_t i, g_size_bytes, s_size_bytes;
1593 uint64_t dptr_dma, rptr_dma;
1594 sg_comp_t *gather_comp;
1595 sg_comp_t *scatter_comp;
1599 /* save space for iv */
1600 offset_vaddr = m_vaddr;
1603 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1604 m_dma += OFF_CTRL_LEN + iv_len;
1606 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1608 /* DPTR has SG list */
1609 in_buffer = m_vaddr;
1612 ((uint16_t *)in_buffer)[0] = 0;
1613 ((uint16_t *)in_buffer)[1] = 0;
1615 /* TODO Add error check if space will be sufficient */
1616 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1623 /* Offset control word followed by iv */
1625 i = fill_sg_comp(gather_comp, i, offset_dma,
1626 OFF_CTRL_LEN + iv_len);
1628 /* iv offset is 0 */
1629 *offset_vaddr = offset_ctrl;
1631 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1632 memcpy(iv_d, iv, 16);
1635 size = inputlen - iv_len;
1637 i = fill_sg_comp_from_iov(gather_comp, i,
1640 if (unlikely(size)) {
1641 CPT_LOG_DP_ERR("Insufficient buffer space,"
1642 " size %d needed", size);
1646 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1647 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1650 * Output Scatter List
1655 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1658 /* IV in SLIST only for EEA3 & UEA2 */
1663 i = fill_sg_comp(scatter_comp, i,
1664 offset_dma + OFF_CTRL_LEN, iv_len);
1667 /* Add output data */
1668 if (req_flags & VALID_MAC_BUF) {
1669 size = outputlen - iv_len - mac_len;
1671 i = fill_sg_comp_from_iov(scatter_comp, i,
1675 if (unlikely(size)) {
1676 CPT_LOG_DP_ERR("Insufficient buffer space,"
1677 " size %d needed", size);
1684 i = fill_sg_comp_from_buf(scatter_comp, i,
1688 /* Output including mac */
1689 size = outputlen - iv_len;
1691 i = fill_sg_comp_from_iov(scatter_comp, i,
1695 if (unlikely(size)) {
1696 CPT_LOG_DP_ERR("Insufficient buffer space,"
1697 " size %d needed", size);
1702 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1703 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1705 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1707 /* This is DPTR len incase of SG mode */
1708 vq_cmd_w0.s.dlen = size;
1710 m_vaddr = (uint8_t *)m_vaddr + size;
1713 /* cpt alternate completion address saved earlier */
1714 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1715 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1716 rptr_dma = c_dma - 8;
1718 req->ist.ei1 = dptr_dma;
1719 req->ist.ei2 = rptr_dma;
1722 /* 16 byte aligned cpt res address */
1723 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1724 *req->completion_addr = COMPLETION_CODE_INIT;
1725 req->comp_baddr = c_dma;
1727 /* Fill microcode part of instruction */
1728 req->ist.ei0 = vq_cmd_w0.u64;
1736 static __rte_always_inline void
1737 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1740 fc_params_t *params,
1745 int32_t inputlen = 0, outputlen;
1746 struct cpt_ctx *cpt_ctx;
1747 uint8_t snow3g, iv_len = 16;
1748 struct cpt_request_info *req;
1750 uint32_t encr_offset;
1751 uint32_t encr_data_len;
1753 void *m_vaddr, *c_vaddr;
1754 uint64_t m_dma, c_dma;
1755 uint64_t *offset_vaddr, offset_dma;
1756 uint32_t *iv_s, iv[4], j;
1757 vq_cmd_word0_t vq_cmd_w0;
1759 buf_p = ¶ms->meta_buf;
1760 m_vaddr = buf_p->vaddr;
1761 m_dma = buf_p->dma_addr;
1764 * Microcode expects offsets in bytes
1765 * TODO: Rounding off
1767 encr_offset = ENCR_OFFSET(d_offs) / 8;
1768 encr_data_len = ENCR_DLEN(d_lens);
1770 cpt_ctx = params->ctx_buf.vaddr;
1771 flags = cpt_ctx->zsk_flags;
1772 snow3g = cpt_ctx->snow3g;
1774 * Save initial space that followed app data for completion code &
1775 * alternate completion code to fall in same cache line as app data
1777 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1778 m_dma += COMPLETION_CODE_SIZE;
1779 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1782 c_vaddr = (uint8_t *)m_vaddr + size;
1783 c_dma = m_dma + size;
1784 size += sizeof(cpt_res_s_t);
1786 m_vaddr = (uint8_t *)m_vaddr + size;
1789 /* Reserve memory for cpt request info */
1792 size = sizeof(struct cpt_request_info);
1793 m_vaddr = (uint8_t *)m_vaddr + size;
1797 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1799 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1801 vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1802 (0 << 3) | (flags & 0x7));
1804 /* consider iv len */
1805 encr_offset += iv_len;
1807 inputlen = encr_offset +
1808 (RTE_ALIGN(encr_data_len, 8) / 8);
1809 outputlen = inputlen;
1812 iv_s = params->iv_buf;
1815 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1816 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1819 for (j = 0; j < 4; j++)
1820 iv[j] = iv_s[3 - j];
1822 /* ZUC doesn't need a swap */
1823 for (j = 0; j < 4; j++)
1828 * GP op header, lengths are expected in bits.
1830 vq_cmd_w0.s.param1 = encr_data_len;
1833 * In 83XX since we have a limitation of
1834 * IV & Offset control word not part of instruction
1835 * and need to be part of Data Buffer, we check if
1836 * head room is there and then only do the Direct mode processing
1838 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1839 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1840 void *dm_vaddr = params->bufs[0].vaddr;
1841 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1843 * This flag indicates that there is 24 bytes head room and
1844 * 8 bytes tail room available, so that we get to do
1845 * DIRECT MODE with limitation
1848 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1849 OFF_CTRL_LEN - iv_len);
1850 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1853 req->ist.ei1 = offset_dma;
1854 /* RPTR should just exclude offset control word */
1855 req->ist.ei2 = dm_dma_addr - iv_len;
1856 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1857 + outputlen - iv_len);
1859 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1861 if (likely(iv_len)) {
1862 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1864 memcpy(iv_d, iv, 16);
1867 /* iv offset is 0 */
1868 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1870 uint32_t i, g_size_bytes, s_size_bytes;
1871 uint64_t dptr_dma, rptr_dma;
1872 sg_comp_t *gather_comp;
1873 sg_comp_t *scatter_comp;
1877 /* save space for offset and iv... */
1878 offset_vaddr = m_vaddr;
1881 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1882 m_dma += OFF_CTRL_LEN + iv_len;
1884 vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1886 /* DPTR has SG list */
1887 in_buffer = m_vaddr;
1890 ((uint16_t *)in_buffer)[0] = 0;
1891 ((uint16_t *)in_buffer)[1] = 0;
1893 /* TODO Add error check if space will be sufficient */
1894 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1901 /* Offset control word */
1903 /* iv offset is 0 */
1904 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1906 i = fill_sg_comp(gather_comp, i, offset_dma,
1907 OFF_CTRL_LEN + iv_len);
1909 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1910 memcpy(iv_d, iv, 16);
1912 /* Add input data */
1913 size = inputlen - iv_len;
1915 i = fill_sg_comp_from_iov(gather_comp, i,
1918 if (unlikely(size)) {
1919 CPT_LOG_DP_ERR("Insufficient buffer space,"
1920 " size %d needed", size);
1924 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1925 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1928 * Output Scatter List
1933 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1936 i = fill_sg_comp(scatter_comp, i,
1937 offset_dma + OFF_CTRL_LEN,
1940 /* Add output data */
1941 size = outputlen - iv_len;
1943 i = fill_sg_comp_from_iov(scatter_comp, i,
1947 if (unlikely(size)) {
1948 CPT_LOG_DP_ERR("Insufficient buffer space,"
1949 " size %d needed", size);
1953 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1954 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1956 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1958 /* This is DPTR len incase of SG mode */
1959 vq_cmd_w0.s.dlen = size;
1961 m_vaddr = (uint8_t *)m_vaddr + size;
1964 /* cpt alternate completion address saved earlier */
1965 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1966 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1967 rptr_dma = c_dma - 8;
1969 req->ist.ei1 = dptr_dma;
1970 req->ist.ei2 = rptr_dma;
1973 if (unlikely((encr_offset >> 16))) {
1974 CPT_LOG_DP_ERR("Offset not supported");
1975 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1979 /* 16 byte aligned cpt res address */
1980 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1981 *req->completion_addr = COMPLETION_CODE_INIT;
1982 req->comp_baddr = c_dma;
1984 /* Fill microcode part of instruction */
1985 req->ist.ei0 = vq_cmd_w0.u64;
1993 static __rte_always_inline void
1994 cpt_kasumi_enc_prep(uint32_t req_flags,
1997 fc_params_t *params,
2002 int32_t inputlen = 0, outputlen = 0;
2003 struct cpt_ctx *cpt_ctx;
2004 uint32_t mac_len = 0;
2006 struct cpt_request_info *req;
2008 uint32_t encr_offset, auth_offset;
2009 uint32_t encr_data_len, auth_data_len;
2011 uint8_t *iv_s, *iv_d, iv_len = 8;
2013 void *m_vaddr, *c_vaddr;
2014 uint64_t m_dma, c_dma;
2015 uint64_t *offset_vaddr, offset_dma;
2016 vq_cmd_word0_t vq_cmd_w0;
2018 uint32_t g_size_bytes, s_size_bytes;
2019 uint64_t dptr_dma, rptr_dma;
2020 sg_comp_t *gather_comp;
2021 sg_comp_t *scatter_comp;
2023 buf_p = ¶ms->meta_buf;
2024 m_vaddr = buf_p->vaddr;
2025 m_dma = buf_p->dma_addr;
2027 encr_offset = ENCR_OFFSET(d_offs) / 8;
2028 auth_offset = AUTH_OFFSET(d_offs) / 8;
2029 encr_data_len = ENCR_DLEN(d_lens);
2030 auth_data_len = AUTH_DLEN(d_lens);
2032 cpt_ctx = params->ctx_buf.vaddr;
2033 flags = cpt_ctx->zsk_flags;
2034 mac_len = cpt_ctx->mac_len;
2037 iv_s = params->iv_buf;
2039 iv_s = params->auth_iv_buf;
2041 dir = iv_s[8] & 0x1;
2044 * Save initial space that followed app data for completion code &
2045 * alternate completion code to fall in same cache line as app data
2047 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2048 m_dma += COMPLETION_CODE_SIZE;
2049 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2052 c_vaddr = (uint8_t *)m_vaddr + size;
2053 c_dma = m_dma + size;
2054 size += sizeof(cpt_res_s_t);
2056 m_vaddr = (uint8_t *)m_vaddr + size;
2059 /* Reserve memory for cpt request info */
2062 size = sizeof(struct cpt_request_info);
2063 m_vaddr = (uint8_t *)m_vaddr + size;
2066 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2068 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2069 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2070 (dir << 4) | (0 << 3) | (flags & 0x7));
2073 * GP op header, lengths are expected in bits.
2075 vq_cmd_w0.s.param1 = encr_data_len;
2076 vq_cmd_w0.s.param2 = auth_data_len;
2078 /* consider iv len */
2080 encr_offset += iv_len;
2081 auth_offset += iv_len;
2084 /* save space for offset ctrl and iv */
2085 offset_vaddr = m_vaddr;
2088 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2089 m_dma += OFF_CTRL_LEN + iv_len;
2091 /* DPTR has SG list */
2092 in_buffer = m_vaddr;
2095 ((uint16_t *)in_buffer)[0] = 0;
2096 ((uint16_t *)in_buffer)[1] = 0;
2098 /* TODO Add error check if space will be sufficient */
2099 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2106 /* Offset control word followed by iv */
2109 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2110 outputlen = inputlen;
2111 /* iv offset is 0 */
2112 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2113 if (unlikely((encr_offset >> 16))) {
2114 CPT_LOG_DP_ERR("Offset not supported");
2115 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
2119 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2120 outputlen = mac_len;
2121 /* iv offset is 0 */
2122 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2123 if (unlikely((auth_offset >> 8))) {
2124 CPT_LOG_DP_ERR("Offset not supported");
2125 CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
2130 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2133 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2134 memcpy(iv_d, iv_s, iv_len);
2137 size = inputlen - iv_len;
2139 i = fill_sg_comp_from_iov(gather_comp, i,
2143 if (unlikely(size)) {
2144 CPT_LOG_DP_ERR("Insufficient buffer space,"
2145 " size %d needed", size);
2149 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2150 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2153 * Output Scatter List
2157 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2160 /* IV in SLIST only for F8 */
2166 i = fill_sg_comp(scatter_comp, i,
2167 offset_dma + OFF_CTRL_LEN,
2171 /* Add output data */
2172 if (req_flags & VALID_MAC_BUF) {
2173 size = outputlen - iv_len - mac_len;
2175 i = fill_sg_comp_from_iov(scatter_comp, i,
2179 if (unlikely(size)) {
2180 CPT_LOG_DP_ERR("Insufficient buffer space,"
2181 " size %d needed", size);
2188 i = fill_sg_comp_from_buf(scatter_comp, i,
2192 /* Output including mac */
2193 size = outputlen - iv_len;
2195 i = fill_sg_comp_from_iov(scatter_comp, i,
2199 if (unlikely(size)) {
2200 CPT_LOG_DP_ERR("Insufficient buffer space,"
2201 " size %d needed", size);
2206 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2207 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2209 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2211 /* This is DPTR len incase of SG mode */
2212 vq_cmd_w0.s.dlen = size;
2214 m_vaddr = (uint8_t *)m_vaddr + size;
2217 /* cpt alternate completion address saved earlier */
2218 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2219 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2220 rptr_dma = c_dma - 8;
2222 req->ist.ei1 = dptr_dma;
2223 req->ist.ei2 = rptr_dma;
2225 /* 16 byte aligned cpt res address */
2226 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2227 *req->completion_addr = COMPLETION_CODE_INIT;
2228 req->comp_baddr = c_dma;
2230 /* Fill microcode part of instruction */
2231 req->ist.ei0 = vq_cmd_w0.u64;
2239 static __rte_always_inline void
2240 cpt_kasumi_dec_prep(uint64_t d_offs,
2242 fc_params_t *params,
2247 int32_t inputlen = 0, outputlen;
2248 struct cpt_ctx *cpt_ctx;
2249 uint8_t i = 0, iv_len = 8;
2250 struct cpt_request_info *req;
2252 uint32_t encr_offset;
2253 uint32_t encr_data_len;
2256 void *m_vaddr, *c_vaddr;
2257 uint64_t m_dma, c_dma;
2258 uint64_t *offset_vaddr, offset_dma;
2259 vq_cmd_word0_t vq_cmd_w0;
2261 uint32_t g_size_bytes, s_size_bytes;
2262 uint64_t dptr_dma, rptr_dma;
2263 sg_comp_t *gather_comp;
2264 sg_comp_t *scatter_comp;
2266 buf_p = ¶ms->meta_buf;
2267 m_vaddr = buf_p->vaddr;
2268 m_dma = buf_p->dma_addr;
2270 encr_offset = ENCR_OFFSET(d_offs) / 8;
2271 encr_data_len = ENCR_DLEN(d_lens);
2273 cpt_ctx = params->ctx_buf.vaddr;
2274 flags = cpt_ctx->zsk_flags;
2276 * Save initial space that followed app data for completion code &
2277 * alternate completion code to fall in same cache line as app data
2279 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2280 m_dma += COMPLETION_CODE_SIZE;
2281 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2284 c_vaddr = (uint8_t *)m_vaddr + size;
2285 c_dma = m_dma + size;
2286 size += sizeof(cpt_res_s_t);
2288 m_vaddr = (uint8_t *)m_vaddr + size;
2291 /* Reserve memory for cpt request info */
2294 size = sizeof(struct cpt_request_info);
2295 m_vaddr = (uint8_t *)m_vaddr + size;
2299 vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2301 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2302 vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2303 (dir << 4) | (0 << 3) | (flags & 0x7));
2306 * GP op header, lengths are expected in bits.
2308 vq_cmd_w0.s.param1 = encr_data_len;
2310 /* consider iv len */
2311 encr_offset += iv_len;
2313 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2314 outputlen = inputlen;
2316 /* save space for offset ctrl & iv */
2317 offset_vaddr = m_vaddr;
2320 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2321 m_dma += OFF_CTRL_LEN + iv_len;
2323 /* DPTR has SG list */
2324 in_buffer = m_vaddr;
2327 ((uint16_t *)in_buffer)[0] = 0;
2328 ((uint16_t *)in_buffer)[1] = 0;
2330 /* TODO Add error check if space will be sufficient */
2331 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2338 /* Offset control word followed by iv */
2339 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2340 if (unlikely((encr_offset >> 16))) {
2341 CPT_LOG_DP_ERR("Offset not supported");
2342 CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
2346 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2349 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2350 params->iv_buf, iv_len);
2352 /* Add input data */
2353 size = inputlen - iv_len;
2355 i = fill_sg_comp_from_iov(gather_comp, i,
2358 if (unlikely(size)) {
2359 CPT_LOG_DP_ERR("Insufficient buffer space,"
2360 " size %d needed", size);
2364 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2365 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2368 * Output Scatter List
2372 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2375 i = fill_sg_comp(scatter_comp, i,
2376 offset_dma + OFF_CTRL_LEN,
2379 /* Add output data */
2380 size = outputlen - iv_len;
2382 i = fill_sg_comp_from_iov(scatter_comp, i,
2385 if (unlikely(size)) {
2386 CPT_LOG_DP_ERR("Insufficient buffer space,"
2387 " size %d needed", size);
2391 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2392 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2394 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2396 /* This is DPTR len incase of SG mode */
2397 vq_cmd_w0.s.dlen = size;
2399 m_vaddr = (uint8_t *)m_vaddr + size;
2402 /* cpt alternate completion address saved earlier */
2403 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2404 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2405 rptr_dma = c_dma - 8;
2407 req->ist.ei1 = dptr_dma;
2408 req->ist.ei2 = rptr_dma;
2410 /* 16 byte aligned cpt res address */
2411 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2412 *req->completion_addr = COMPLETION_CODE_INIT;
2413 req->comp_baddr = c_dma;
2415 /* Fill microcode part of instruction */
2416 req->ist.ei0 = vq_cmd_w0.u64;
2424 static __rte_always_inline void *
2425 cpt_fc_dec_hmac_prep(uint32_t flags,
2428 fc_params_t *fc_params,
2431 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2433 void *prep_req = NULL;
2435 fc_type = ctx->fc_type;
2437 if (likely(fc_type == FC_GEN)) {
2438 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2440 } else if (fc_type == ZUC_SNOW3G) {
2441 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2443 } else if (fc_type == KASUMI) {
2444 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2448 * For AUTH_ONLY case,
2449 * MC only supports digest generation and verification
2450 * should be done in software by memcmp()
2456 static __rte_always_inline void *__rte_hot
2457 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2458 fc_params_t *fc_params, void *op)
2460 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2462 void *prep_req = NULL;
2464 fc_type = ctx->fc_type;
2466 /* Common api for rest of the ops */
2467 if (likely(fc_type == FC_GEN)) {
2468 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2470 } else if (fc_type == ZUC_SNOW3G) {
2471 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2473 } else if (fc_type == KASUMI) {
2474 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2476 } else if (fc_type == HASH_HMAC) {
2477 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2483 static __rte_always_inline int
2484 cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
2485 const uint8_t *key, uint16_t key_len, uint16_t mac_len)
2487 mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
2488 mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
2489 mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
2491 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2496 /* No support for AEAD yet */
2497 if (cpt_ctx->enc_cipher)
2499 /* For ZUC/SNOW3G/Kasumi */
2502 cpt_ctx->snow3g = 1;
2503 gen_key_snow3g(key, keyx);
2504 memcpy(zs_ctx->ci_key, keyx, key_len);
2505 cpt_ctx->fc_type = ZUC_SNOW3G;
2506 cpt_ctx->zsk_flags = 0x1;
2509 cpt_ctx->snow3g = 0;
2510 memcpy(zs_ctx->ci_key, key, key_len);
2511 memcpy(zs_ctx->zuc_const, zuc_d, 32);
2512 cpt_ctx->fc_type = ZUC_SNOW3G;
2513 cpt_ctx->zsk_flags = 0x1;
2516 /* Kasumi ECB mode */
2518 memcpy(k_ctx->ci_key, key, key_len);
2519 cpt_ctx->fc_type = KASUMI;
2520 cpt_ctx->zsk_flags = 0x1;
2523 memcpy(k_ctx->ci_key, key, key_len);
2524 cpt_ctx->fc_type = KASUMI;
2525 cpt_ctx->zsk_flags = 0x1;
2530 cpt_ctx->mac_len = 4;
2531 cpt_ctx->hash_type = type;
2535 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2536 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2537 cpt_ctx->fc_type = HASH_HMAC;
2540 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2543 /* For GMAC auth, cipher must be NULL */
2544 if (type == GMAC_TYPE)
2545 fctx->enc.enc_cipher = 0;
2547 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2548 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2553 cpt_ctx->auth_key = rte_zmalloc(NULL, key_len, 8);
2554 if (cpt_ctx->auth_key == NULL)
2557 cpt_ctx->auth_key_iova = rte_mem_virt2iova(cpt_ctx->auth_key);
2558 memcpy(cpt_ctx->auth_key, key, key_len);
2559 cpt_ctx->auth_key_len = key_len;
2560 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2561 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2564 memcpy(fctx->hmac.opad, key, key_len);
2565 fctx->enc.auth_input_type = 1;
2570 static __rte_always_inline int
2571 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2572 struct cpt_sess_misc *sess)
2574 struct rte_crypto_aead_xform *aead_form;
2575 cipher_type_t enc_type = 0; /* NULL Cipher type */
2576 auth_type_t auth_type = 0; /* NULL Auth type */
2577 uint32_t cipher_key_len = 0;
2578 uint8_t aes_gcm = 0;
2579 aead_form = &xform->aead;
2580 void *ctx = SESS_PRIV(sess);
2582 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2583 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2584 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2585 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2586 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2587 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2589 CPT_LOG_DP_ERR("Unknown aead operation\n");
2592 switch (aead_form->algo) {
2593 case RTE_CRYPTO_AEAD_AES_GCM:
2595 cipher_key_len = 16;
2598 case RTE_CRYPTO_AEAD_AES_CCM:
2599 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2602 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2603 enc_type = CHACHA20;
2604 auth_type = POLY1305;
2605 cipher_key_len = 32;
2606 sess->chacha_poly = 1;
2609 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2613 if (aead_form->key.length < cipher_key_len) {
2614 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2615 (unsigned int long)aead_form->key.length);
2619 sess->aes_gcm = aes_gcm;
2620 sess->mac_len = aead_form->digest_length;
2621 sess->iv_offset = aead_form->iv.offset;
2622 sess->iv_length = aead_form->iv.length;
2623 sess->aad_length = aead_form->aad_length;
2625 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2626 aead_form->key.length, NULL)))
2629 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2630 aead_form->digest_length)))
2636 static __rte_always_inline int
2637 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2638 struct cpt_sess_misc *sess)
2640 struct rte_crypto_cipher_xform *c_form;
2641 struct cpt_ctx *ctx = SESS_PRIV(sess);
2642 cipher_type_t enc_type = 0; /* NULL Cipher type */
2643 uint32_t cipher_key_len = 0;
2644 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2646 c_form = &xform->cipher;
2648 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2649 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2650 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2651 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2652 if (xform->next != NULL &&
2653 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2654 /* Perform decryption followed by auth verify */
2658 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2662 switch (c_form->algo) {
2663 case RTE_CRYPTO_CIPHER_AES_CBC:
2665 cipher_key_len = 16;
2667 case RTE_CRYPTO_CIPHER_3DES_CBC:
2668 enc_type = DES3_CBC;
2669 cipher_key_len = 24;
2671 case RTE_CRYPTO_CIPHER_DES_CBC:
2672 /* DES is implemented using 3DES in hardware */
2673 enc_type = DES3_CBC;
2676 case RTE_CRYPTO_CIPHER_AES_CTR:
2678 cipher_key_len = 16;
2681 case RTE_CRYPTO_CIPHER_NULL:
2685 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2686 enc_type = KASUMI_F8_ECB;
2687 cipher_key_len = 16;
2690 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2691 enc_type = SNOW3G_UEA2;
2692 cipher_key_len = 16;
2695 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2696 enc_type = ZUC_EEA3;
2697 cipher_key_len = 16;
2700 case RTE_CRYPTO_CIPHER_AES_XTS:
2702 cipher_key_len = 16;
2704 case RTE_CRYPTO_CIPHER_3DES_ECB:
2705 enc_type = DES3_ECB;
2706 cipher_key_len = 24;
2708 case RTE_CRYPTO_CIPHER_AES_ECB:
2710 cipher_key_len = 16;
2712 case RTE_CRYPTO_CIPHER_3DES_CTR:
2713 case RTE_CRYPTO_CIPHER_AES_F8:
2714 case RTE_CRYPTO_CIPHER_ARC4:
2715 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2719 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2724 if (c_form->key.length < cipher_key_len) {
2725 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2726 (unsigned long) c_form->key.length);
2730 sess->zsk_flag = zsk_flag;
2732 sess->aes_ctr = aes_ctr;
2733 sess->iv_offset = c_form->iv.offset;
2734 sess->iv_length = c_form->iv.length;
2735 sess->is_null = is_null;
2737 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2738 c_form->key.data, c_form->key.length, NULL)))
2744 static __rte_always_inline int
2745 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2746 struct cpt_sess_misc *sess)
2748 struct cpt_ctx *ctx = SESS_PRIV(sess);
2749 struct rte_crypto_auth_xform *a_form;
2750 auth_type_t auth_type = 0; /* NULL Auth type */
2751 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2753 if (xform->next != NULL &&
2754 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2755 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2756 /* Perform auth followed by encryption */
2760 a_form = &xform->auth;
2762 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2763 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2764 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2765 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2767 CPT_LOG_DP_ERR("Unknown auth operation");
2771 switch (a_form->algo) {
2772 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2774 case RTE_CRYPTO_AUTH_SHA1:
2775 auth_type = SHA1_TYPE;
2777 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2778 case RTE_CRYPTO_AUTH_SHA256:
2779 auth_type = SHA2_SHA256;
2781 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2782 case RTE_CRYPTO_AUTH_SHA512:
2783 auth_type = SHA2_SHA512;
2785 case RTE_CRYPTO_AUTH_AES_GMAC:
2786 auth_type = GMAC_TYPE;
2789 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2790 case RTE_CRYPTO_AUTH_SHA224:
2791 auth_type = SHA2_SHA224;
2793 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2794 case RTE_CRYPTO_AUTH_SHA384:
2795 auth_type = SHA2_SHA384;
2797 case RTE_CRYPTO_AUTH_MD5_HMAC:
2798 case RTE_CRYPTO_AUTH_MD5:
2799 auth_type = MD5_TYPE;
2801 case RTE_CRYPTO_AUTH_KASUMI_F9:
2802 auth_type = KASUMI_F9_ECB;
2804 * Indicate that direction needs to be taken out
2809 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2810 auth_type = SNOW3G_UIA2;
2813 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2814 auth_type = ZUC_EIA3;
2817 case RTE_CRYPTO_AUTH_NULL:
2821 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2822 case RTE_CRYPTO_AUTH_AES_CMAC:
2823 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2824 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2828 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2833 sess->zsk_flag = zsk_flag;
2834 sess->aes_gcm = aes_gcm;
2835 sess->mac_len = a_form->digest_length;
2836 sess->is_null = is_null;
2838 sess->auth_iv_offset = a_form->iv.offset;
2839 sess->auth_iv_length = a_form->iv.length;
2841 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2842 a_form->key.data, a_form->key.length,
2843 a_form->digest_length)))
2849 static __rte_always_inline int
2850 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2851 struct cpt_sess_misc *sess)
2853 struct rte_crypto_auth_xform *a_form;
2854 cipher_type_t enc_type = 0; /* NULL Cipher type */
2855 auth_type_t auth_type = 0; /* NULL Auth type */
2856 void *ctx = SESS_PRIV(sess);
2858 a_form = &xform->auth;
2860 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2861 sess->cpt_op |= CPT_OP_ENCODE;
2862 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2863 sess->cpt_op |= CPT_OP_DECODE;
2865 CPT_LOG_DP_ERR("Unknown auth operation");
2869 switch (a_form->algo) {
2870 case RTE_CRYPTO_AUTH_AES_GMAC:
2872 auth_type = GMAC_TYPE;
2875 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2883 sess->iv_offset = a_form->iv.offset;
2884 sess->iv_length = a_form->iv.length;
2885 sess->mac_len = a_form->digest_length;
2887 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2888 a_form->key.length, NULL)))
2891 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2892 a_form->digest_length)))
2898 static __rte_always_inline void *
2899 alloc_op_meta(struct rte_mbuf *m_src,
2902 struct rte_mempool *cpt_meta_pool)
2906 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2907 if (likely(m_src && (m_src->nb_segs == 1))) {
2911 /* Check if tailroom is sufficient to hold meta data */
2912 tailroom = rte_pktmbuf_tailroom(m_src);
2913 if (likely(tailroom > len + 8)) {
2914 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2915 mphys = m_src->buf_iova + m_src->buf_len;
2919 buf->dma_addr = mphys;
2921 /* Indicate that this is a mbuf allocated mdata */
2922 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2927 RTE_SET_USED(m_src);
2930 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2934 buf->dma_addr = rte_mempool_virt2iova(mdata);
2941 * cpt_free_metabuf - free metabuf to mempool.
2942 * @param instance: pointer to instance.
2943 * @param objp: pointer to the metabuf.
2945 static __rte_always_inline void
2946 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2948 bool nofree = ((uintptr_t)mdata & 1ull);
2952 rte_mempool_put(cpt_meta_pool, mdata);
2955 static __rte_always_inline uint32_t
2956 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2957 iov_ptr_t *iovec, uint32_t start_offset)
2960 void *seg_data = NULL;
2961 phys_addr_t seg_phys;
2962 int32_t seg_size = 0;
2969 if (!start_offset) {
2970 seg_data = rte_pktmbuf_mtod(pkt, void *);
2971 seg_phys = rte_pktmbuf_iova(pkt);
2972 seg_size = pkt->data_len;
2974 while (start_offset >= pkt->data_len) {
2975 start_offset -= pkt->data_len;
2979 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2980 seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2981 seg_size = pkt->data_len - start_offset;
2987 iovec->bufs[index].vaddr = seg_data;
2988 iovec->bufs[index].dma_addr = seg_phys;
2989 iovec->bufs[index].size = seg_size;
2993 while (unlikely(pkt != NULL)) {
2994 seg_data = rte_pktmbuf_mtod(pkt, void *);
2995 seg_phys = rte_pktmbuf_iova(pkt);
2996 seg_size = pkt->data_len;
3000 iovec->bufs[index].vaddr = seg_data;
3001 iovec->bufs[index].dma_addr = seg_phys;
3002 iovec->bufs[index].size = seg_size;
3009 iovec->buf_cnt = index;
3013 static __rte_always_inline uint32_t
3014 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3019 void *seg_data = NULL;
3020 phys_addr_t seg_phys;
3021 uint32_t seg_size = 0;
3024 seg_data = rte_pktmbuf_mtod(pkt, void *);
3025 seg_phys = rte_pktmbuf_iova(pkt);
3026 seg_size = pkt->data_len;
3029 if (likely(!pkt->next)) {
3030 uint32_t headroom, tailroom;
3032 *flags |= SINGLE_BUF_INPLACE;
3033 headroom = rte_pktmbuf_headroom(pkt);
3034 tailroom = rte_pktmbuf_tailroom(pkt);
3035 if (likely((headroom >= 24) &&
3037 /* In 83XX this is prerequisite for Direct mode */
3038 *flags |= SINGLE_BUF_HEADTAILROOM;
3040 param->bufs[0].vaddr = seg_data;
3041 param->bufs[0].dma_addr = seg_phys;
3042 param->bufs[0].size = seg_size;
3045 iovec = param->src_iov;
3046 iovec->bufs[index].vaddr = seg_data;
3047 iovec->bufs[index].dma_addr = seg_phys;
3048 iovec->bufs[index].size = seg_size;
3052 while (unlikely(pkt != NULL)) {
3053 seg_data = rte_pktmbuf_mtod(pkt, void *);
3054 seg_phys = rte_pktmbuf_iova(pkt);
3055 seg_size = pkt->data_len;
3060 iovec->bufs[index].vaddr = seg_data;
3061 iovec->bufs[index].dma_addr = seg_phys;
3062 iovec->bufs[index].size = seg_size;
3069 iovec->buf_cnt = index;
3073 static __rte_always_inline int
3074 fill_fc_params(struct rte_crypto_op *cop,
3075 struct cpt_sess_misc *sess_misc,
3076 struct cpt_qp_meta_info *m_info,
3081 struct rte_crypto_sym_op *sym_op = cop->sym;
3082 struct cpt_ctx *ctx = SESS_PRIV(sess_misc);
3085 uint32_t mc_hash_off;
3087 uint64_t d_offs, d_lens;
3088 struct rte_mbuf *m_src, *m_dst;
3089 uint8_t cpt_op = sess_misc->cpt_op;
3090 #ifdef CPT_ALWAYS_USE_SG_MODE
3091 uint8_t inplace = 0;
3093 uint8_t inplace = 1;
3095 fc_params_t fc_params;
3096 char src[SRC_IOV_SIZE];
3097 char dst[SRC_IOV_SIZE];
3101 if (likely(sess_misc->iv_length)) {
3102 flags |= VALID_IV_BUF;
3103 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3104 uint8_t *, sess_misc->iv_offset);
3105 if (sess_misc->aes_ctr &&
3106 unlikely(sess_misc->iv_length != 16)) {
3107 memcpy((uint8_t *)iv_buf,
3108 rte_crypto_op_ctod_offset(cop,
3109 uint8_t *, sess_misc->iv_offset), 12);
3110 iv_buf[3] = rte_cpu_to_be_32(0x1);
3111 fc_params.iv_buf = iv_buf;
3115 if (sess_misc->zsk_flag) {
3116 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3118 sess_misc->auth_iv_offset);
3119 if (sess_misc->zsk_flag != ZS_EA)
3122 m_src = sym_op->m_src;
3123 m_dst = sym_op->m_dst;
3125 if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3130 d_offs = sym_op->aead.data.offset;
3131 d_lens = sym_op->aead.data.length;
3132 mc_hash_off = sym_op->aead.data.offset +
3133 sym_op->aead.data.length;
3135 aad_data = sym_op->aead.aad.data;
3136 aad_len = sess_misc->aad_length;
3137 if (likely((aad_data + aad_len) ==
3138 rte_pktmbuf_mtod_offset(m_src,
3140 sym_op->aead.data.offset))) {
3141 d_offs = (d_offs - aad_len) | (d_offs << 16);
3142 d_lens = (d_lens + aad_len) | (d_lens << 32);
3144 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3145 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3146 fc_params.aad_buf.size = aad_len;
3147 flags |= VALID_AAD_BUF;
3149 d_offs = d_offs << 16;
3150 d_lens = d_lens << 32;
3153 salt = fc_params.iv_buf;
3154 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3155 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3156 sess_misc->salt = *(uint32_t *)salt;
3158 fc_params.iv_buf = salt + 4;
3159 if (likely(sess_misc->mac_len)) {
3160 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3166 /* hmac immediately following data is best case */
3167 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3169 (uint8_t *)sym_op->aead.digest.data)) {
3170 flags |= VALID_MAC_BUF;
3171 fc_params.mac_buf.size = sess_misc->mac_len;
3172 fc_params.mac_buf.vaddr =
3173 sym_op->aead.digest.data;
3174 fc_params.mac_buf.dma_addr =
3175 sym_op->aead.digest.phys_addr;
3180 d_offs = sym_op->cipher.data.offset;
3181 d_lens = sym_op->cipher.data.length;
3182 mc_hash_off = sym_op->cipher.data.offset +
3183 sym_op->cipher.data.length;
3184 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3185 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3187 if (mc_hash_off < (sym_op->auth.data.offset +
3188 sym_op->auth.data.length)){
3189 mc_hash_off = (sym_op->auth.data.offset +
3190 sym_op->auth.data.length);
3192 /* for gmac, salt should be updated like in gcm */
3193 if (unlikely(sess_misc->is_gmac)) {
3195 salt = fc_params.iv_buf;
3196 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3197 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3198 sess_misc->salt = *(uint32_t *)salt;
3200 fc_params.iv_buf = salt + 4;
3202 if (likely(sess_misc->mac_len)) {
3205 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3209 /* hmac immediately following data is best case */
3210 if (!ctx->dec_auth && !ctx->auth_enc &&
3211 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3213 (uint8_t *)sym_op->auth.digest.data))) {
3214 flags |= VALID_MAC_BUF;
3215 fc_params.mac_buf.size =
3217 fc_params.mac_buf.vaddr =
3218 sym_op->auth.digest.data;
3219 fc_params.mac_buf.dma_addr =
3220 sym_op->auth.digest.phys_addr;
3225 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3226 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3228 if (!ctx->dec_auth &&
3229 unlikely(sess_misc->is_null ||
3230 sess_misc->cpt_op == CPT_OP_DECODE))
3233 if (likely(!m_dst && inplace)) {
3234 /* Case of single buffer without AAD buf or
3235 * separate mac buf in place and
3238 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3240 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3243 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3249 /* Out of place processing */
3250 fc_params.src_iov = (void *)src;
3251 fc_params.dst_iov = (void *)dst;
3253 /* Store SG I/O in the api for reuse */
3254 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3255 CPT_LOG_DP_ERR("Prepare src iov failed");
3260 if (unlikely(m_dst != NULL)) {
3263 /* Try to make room as much as src has */
3264 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3266 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3267 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3268 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3269 CPT_LOG_DP_ERR("Not enough space in "
3278 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3279 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3285 fc_params.dst_iov = (void *)src;
3289 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3290 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3291 m_info->lb_mlen, m_info->pool);
3293 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3294 m_info->sg_mlen, m_info->pool);
3296 if (unlikely(mdata == NULL)) {
3297 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3302 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3303 op[0] = (uintptr_t)mdata;
3304 op[1] = (uintptr_t)cop;
3305 op[2] = op[3] = 0; /* Used to indicate auth verify */
3306 space += 4 * sizeof(uint64_t);
3308 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3309 fc_params.meta_buf.dma_addr += space;
3310 fc_params.meta_buf.size -= space;
3312 /* Finally prepare the instruction */
3313 if (cpt_op & CPT_OP_ENCODE)
3314 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3317 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3320 if (unlikely(*prep_req == NULL)) {
3321 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3323 goto free_mdata_and_exit;
3330 free_mdata_and_exit:
3331 free_op_meta(mdata, m_info->pool);
3336 static __rte_always_inline void
3337 compl_auth_verify(struct rte_crypto_op *op,
3342 struct rte_crypto_sym_op *sym_op = op->sym;
3344 if (sym_op->auth.digest.data)
3345 mac = sym_op->auth.digest.data;
3347 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3349 sym_op->auth.data.length +
3350 sym_op->auth.data.offset);
3352 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3356 if (memcmp(mac, gen_mac, mac_len))
3357 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3359 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3362 static __rte_always_inline void
3363 find_kasumif9_direction_and_length(uint8_t *src,
3364 uint32_t counter_num_bytes,
3365 uint32_t *addr_length_in_bits,
3366 uint8_t *addr_direction)
3371 while (!found && counter_num_bytes > 0) {
3372 counter_num_bytes--;
3373 if (src[counter_num_bytes] == 0x00)
3375 pos = rte_bsf32(src[counter_num_bytes]);
3377 if (likely(counter_num_bytes > 0)) {
3378 last_byte = src[counter_num_bytes - 1];
3379 *addr_direction = last_byte & 0x1;
3380 *addr_length_in_bits = counter_num_bytes * 8
3384 last_byte = src[counter_num_bytes];
3385 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3386 *addr_length_in_bits = counter_num_bytes * 8
3394 * This handles all auth only except AES_GMAC
3396 static __rte_always_inline int
3397 fill_digest_params(struct rte_crypto_op *cop,
3398 struct cpt_sess_misc *sess,
3399 struct cpt_qp_meta_info *m_info,
3404 struct rte_crypto_sym_op *sym_op = cop->sym;
3408 uint32_t auth_range_off;
3410 uint64_t d_offs = 0, d_lens;
3411 struct rte_mbuf *m_src, *m_dst;
3412 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3413 uint16_t mac_len = sess->mac_len;
3415 char src[SRC_IOV_SIZE];
3419 memset(¶ms, 0, sizeof(fc_params_t));
3421 m_src = sym_op->m_src;
3423 /* For just digest lets force mempool alloc */
3424 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3426 if (mdata == NULL) {
3431 mphys = params.meta_buf.dma_addr;
3434 op[0] = (uintptr_t)mdata;
3435 op[1] = (uintptr_t)cop;
3436 op[2] = op[3] = 0; /* Used to indicate auth verify */
3437 space += 4 * sizeof(uint64_t);
3439 auth_range_off = sym_op->auth.data.offset;
3441 flags = VALID_MAC_BUF;
3442 params.src_iov = (void *)src;
3443 if (unlikely(sess->zsk_flag)) {
3445 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3446 * we will send pass through even for auth only case,
3449 d_offs = auth_range_off;
3451 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3452 uint8_t *, sess->auth_iv_offset);
3453 if (sess->zsk_flag == K_F9) {
3454 uint32_t length_in_bits, num_bytes;
3455 uint8_t *src, direction = 0;
3457 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3460 * This is kasumi f9, take direction from
3463 length_in_bits = cop->sym->auth.data.length;
3464 num_bytes = (length_in_bits >> 3);
3465 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3466 find_kasumif9_direction_and_length(src,
3470 length_in_bits -= 64;
3471 cop->sym->auth.data.offset += 64;
3472 d_offs = cop->sym->auth.data.offset;
3473 auth_range_off = d_offs / 8;
3474 cop->sym->auth.data.length = length_in_bits;
3476 /* Store it at end of auth iv */
3477 iv_buf[8] = direction;
3478 params.auth_iv_buf = iv_buf;
3482 d_lens = sym_op->auth.data.length;
3484 params.ctx_buf.vaddr = SESS_PRIV(sess);
3485 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3487 if (auth_op == CPT_OP_AUTH_GENERATE) {
3488 if (sym_op->auth.digest.data) {
3490 * Digest to be generated
3491 * in separate buffer
3493 params.mac_buf.size =
3495 params.mac_buf.vaddr =
3496 sym_op->auth.digest.data;
3497 params.mac_buf.dma_addr =
3498 sym_op->auth.digest.phys_addr;
3500 uint32_t off = sym_op->auth.data.offset +
3501 sym_op->auth.data.length;
3502 int32_t dlen, space;
3504 m_dst = sym_op->m_dst ?
3505 sym_op->m_dst : sym_op->m_src;
3506 dlen = rte_pktmbuf_pkt_len(m_dst);
3508 space = off + mac_len - dlen;
3510 if (!rte_pktmbuf_append(m_dst, space)) {
3511 CPT_LOG_DP_ERR("Failed to extend "
3512 "mbuf by %uB", space);
3514 goto free_mdata_and_exit;
3517 params.mac_buf.vaddr =
3518 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3519 params.mac_buf.dma_addr =
3520 rte_pktmbuf_iova_offset(m_dst, off);
3521 params.mac_buf.size = mac_len;
3524 /* Need space for storing generated mac */
3525 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3526 params.mac_buf.dma_addr = mphys + space;
3527 params.mac_buf.size = mac_len;
3528 space += RTE_ALIGN_CEIL(mac_len, 8);
3529 op[2] = (uintptr_t)params.mac_buf.vaddr;
3533 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3534 params.meta_buf.dma_addr = mphys + space;
3535 params.meta_buf.size -= space;
3537 /* Out of place processing */
3538 params.src_iov = (void *)src;
3540 /*Store SG I/O in the api for reuse */
3541 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3542 CPT_LOG_DP_ERR("Prepare src iov failed");
3544 goto free_mdata_and_exit;
3547 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3548 if (unlikely(*prep_req == NULL)) {
3550 goto free_mdata_and_exit;
3557 free_mdata_and_exit:
3558 free_op_meta(mdata, m_info->pool);
3563 #endif /*_CPT_UCODE_H_ */