1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline void
39 cpt_fc_salt_update(void *ctx,
42 struct cpt_ctx *cpt_ctx = ctx;
43 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
46 static __rte_always_inline int
47 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
59 static __rte_always_inline int
60 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
76 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
81 key_len = key_len / 2;
82 if (unlikely(key_len == CPT_BYTE_24)) {
83 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
86 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
92 if (unlikely(key_len != 16))
94 /* No support for AEAD yet */
95 if (unlikely(ctx->hash_type))
101 if (unlikely(key_len != 16))
103 /* No support for AEAD yet */
104 if (unlikely(ctx->hash_type))
112 ctx->fc_type = fc_type;
116 static __rte_always_inline void
117 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
119 cpt_ctx->enc_cipher = 0;
120 fctx->enc.enc_cipher = 0;
123 static __rte_always_inline void
124 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
126 mc_aes_type_t aes_key_type = 0;
129 aes_key_type = AES_128_BIT;
132 aes_key_type = AES_192_BIT;
135 aes_key_type = AES_256_BIT;
138 /* This should not happen */
139 CPT_LOG_DP_ERR("Invalid AES key len");
142 fctx->enc.aes_key = aes_key_type;
145 static __rte_always_inline void
146 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
151 gen_key_snow3g(key, keyx);
152 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
153 cpt_ctx->zsk_flags = 0;
156 static __rte_always_inline void
157 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
161 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
162 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
163 cpt_ctx->zsk_flags = 0;
166 static __rte_always_inline void
167 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
171 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
172 cpt_ctx->zsk_flags = 0;
175 static __rte_always_inline void
176 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
179 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
180 cpt_ctx->zsk_flags = 0;
183 static __rte_always_inline int
184 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
185 uint16_t key_len, uint8_t *salt)
187 struct cpt_ctx *cpt_ctx = ctx;
188 mc_fc_context_t *fctx = &cpt_ctx->fctx;
191 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
195 if (cpt_ctx->fc_type == FC_GEN) {
197 * We need to always say IV is from DPTR as user can
198 * sometimes iverride IV per operation.
200 fctx->enc.iv_source = CPT_FROM_DPTR;
202 if (cpt_ctx->auth_key_len > 64)
208 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
211 /* CPT performs DES using 3DES with the 8B DES-key
212 * replicated 2 more times to match the 24B 3DES-key.
213 * Eg. If org. key is "0x0a 0x0b", then new key is
214 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
217 /* Skipping the first 8B as it will be copied
218 * in the regular code flow
220 memcpy(fctx->enc.encr_key+key_len, key, key_len);
221 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
225 /* For DES3_ECB IV need to be from CTX. */
226 fctx->enc.iv_source = CPT_FROM_CTX;
232 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
235 /* Even though iv source is from dptr,
236 * aes_gcm salt is taken from ctx
239 memcpy(fctx->enc.encr_iv, salt, 4);
240 /* Assuming it was just salt update
246 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
249 key_len = key_len / 2;
250 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
252 /* Copy key2 for XTS into ipad */
253 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
254 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
257 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
260 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
263 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
266 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
272 /* Only for FC_GEN case */
274 /* For GMAC auth, cipher must be NULL */
275 if (cpt_ctx->hash_type != GMAC_TYPE)
276 fctx->enc.enc_cipher = type;
278 memcpy(fctx->enc.encr_key, key, key_len);
281 cpt_ctx->enc_cipher = type;
286 static __rte_always_inline uint32_t
287 fill_sg_comp(sg_comp_t *list,
289 phys_addr_t dma_addr,
292 sg_comp_t *to = &list[i>>2];
294 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
295 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
300 static __rte_always_inline uint32_t
301 fill_sg_comp_from_buf(sg_comp_t *list,
305 sg_comp_t *to = &list[i>>2];
307 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
308 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
313 static __rte_always_inline uint32_t
314 fill_sg_comp_from_buf_min(sg_comp_t *list,
319 sg_comp_t *to = &list[i >> 2];
320 uint32_t size = *psize;
323 e_len = (size > from->size) ? from->size : size;
324 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
325 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
332 * This fills the MC expected SGIO list
333 * from IOV given by user.
335 static __rte_always_inline uint32_t
336 fill_sg_comp_from_iov(sg_comp_t *list,
338 iov_ptr_t *from, uint32_t from_offset,
339 uint32_t *psize, buf_ptr_t *extra_buf,
340 uint32_t extra_offset)
343 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
344 uint32_t size = *psize;
348 for (j = 0; (j < from->buf_cnt) && size; j++) {
349 phys_addr_t e_dma_addr;
351 sg_comp_t *to = &list[i >> 2];
353 if (unlikely(from_offset)) {
354 if (from_offset >= bufs[j].size) {
355 from_offset -= bufs[j].size;
358 e_dma_addr = bufs[j].dma_addr + from_offset;
359 e_len = (size > (bufs[j].size - from_offset)) ?
360 (bufs[j].size - from_offset) : size;
363 e_dma_addr = bufs[j].dma_addr;
364 e_len = (size > bufs[j].size) ?
368 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
369 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
371 if (extra_len && (e_len >= extra_offset)) {
372 /* Break the data at given offset */
373 uint32_t next_len = e_len - extra_offset;
374 phys_addr_t next_dma = e_dma_addr + extra_offset;
379 e_len = extra_offset;
381 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
384 extra_len = RTE_MIN(extra_len, size);
385 /* Insert extra data ptr */
390 rte_cpu_to_be_16(extra_len);
392 rte_cpu_to_be_64(extra_buf->dma_addr);
396 next_len = RTE_MIN(next_len, size);
397 /* insert the rest of the data */
401 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
402 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
411 extra_offset -= size;
419 static __rte_always_inline void
420 cpt_digest_gen_prep(uint32_t flags,
422 digest_params_t *params,
426 struct cpt_request_info *req;
428 uint16_t data_len, mac_len, key_len;
429 auth_type_t hash_type;
432 sg_comp_t *gather_comp;
433 sg_comp_t *scatter_comp;
435 uint32_t g_size_bytes, s_size_bytes;
436 uint64_t dptr_dma, rptr_dma;
437 vq_cmd_word0_t vq_cmd_w0;
438 vq_cmd_word3_t vq_cmd_w3;
439 void *c_vaddr, *m_vaddr;
440 uint64_t c_dma, m_dma;
441 opcode_info_t opcode;
443 ctx = params->ctx_buf.vaddr;
444 meta_p = ¶ms->meta_buf;
446 m_vaddr = meta_p->vaddr;
447 m_dma = meta_p->dma_addr;
450 * Save initial space that followed app data for completion code &
451 * alternate completion code to fall in same cache line as app data
453 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
454 m_dma += COMPLETION_CODE_SIZE;
455 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
457 c_vaddr = (uint8_t *)m_vaddr + size;
458 c_dma = m_dma + size;
459 size += sizeof(cpt_res_s_t);
461 m_vaddr = (uint8_t *)m_vaddr + size;
466 size = sizeof(struct cpt_request_info);
467 m_vaddr = (uint8_t *)m_vaddr + size;
470 hash_type = ctx->hash_type;
471 mac_len = ctx->mac_len;
472 key_len = ctx->auth_key_len;
473 data_len = AUTH_DLEN(d_lens);
477 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
479 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
480 vq_cmd_w0.s.param1 = key_len;
481 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
483 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
484 vq_cmd_w0.s.param1 = 0;
485 vq_cmd_w0.s.dlen = data_len;
490 /* Null auth only case enters the if */
491 if (unlikely(!hash_type && !ctx->enc_cipher)) {
492 opcode.s.major = CPT_MAJOR_OP_MISC;
493 /* Minor op is passthrough */
494 opcode.s.minor = 0x03;
495 /* Send out completion code only */
496 vq_cmd_w0.s.param2 = 0x1;
499 vq_cmd_w0.s.opcode = opcode.flags;
501 /* DPTR has SG list */
505 ((uint16_t *)in_buffer)[0] = 0;
506 ((uint16_t *)in_buffer)[1] = 0;
508 /* TODO Add error check if space will be sufficient */
509 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
518 uint64_t k_dma = params->ctx_buf.dma_addr +
519 offsetof(struct cpt_ctx, auth_key);
521 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
527 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
529 if (unlikely(size)) {
530 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
536 * Looks like we need to support zero data
537 * gather ptr in case of hash & hmac
541 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
542 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
549 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
551 if (flags & VALID_MAC_BUF) {
552 if (unlikely(params->mac_buf.size < mac_len)) {
553 CPT_LOG_DP_ERR("Insufficient MAC size");
558 i = fill_sg_comp_from_buf_min(scatter_comp, i,
559 ¶ms->mac_buf, &size);
562 i = fill_sg_comp_from_iov(scatter_comp, i,
563 params->src_iov, data_len,
565 if (unlikely(size)) {
566 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
572 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
573 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
575 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
577 /* This is DPTR len incase of SG mode */
578 vq_cmd_w0.s.dlen = size;
580 m_vaddr = (uint8_t *)m_vaddr + size;
583 /* cpt alternate completion address saved earlier */
584 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
585 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
586 rptr_dma = c_dma - 8;
588 req->ist.ei1 = dptr_dma;
589 req->ist.ei2 = rptr_dma;
594 /* 16 byte aligned cpt res address */
595 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
596 *req->completion_addr = COMPLETION_CODE_INIT;
597 req->comp_baddr = c_dma;
599 /* Fill microcode part of instruction */
600 req->ist.ei0 = vq_cmd_w0.u64;
601 req->ist.ei3 = vq_cmd_w3.u64;
609 static __rte_always_inline void
610 cpt_enc_hmac_prep(uint32_t flags,
613 fc_params_t *fc_params,
617 uint32_t iv_offset = 0;
618 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
619 struct cpt_ctx *cpt_ctx;
620 uint32_t cipher_type, hash_type;
621 uint32_t mac_len, size;
623 struct cpt_request_info *req;
624 buf_ptr_t *meta_p, *aad_buf = NULL;
625 uint32_t encr_offset, auth_offset;
626 uint32_t encr_data_len, auth_data_len, aad_len = 0;
627 uint32_t passthrough_len = 0;
628 void *m_vaddr, *offset_vaddr;
629 uint64_t m_dma, offset_dma, ctx_dma;
630 vq_cmd_word0_t vq_cmd_w0;
631 vq_cmd_word3_t vq_cmd_w3;
634 opcode_info_t opcode;
636 meta_p = &fc_params->meta_buf;
637 m_vaddr = meta_p->vaddr;
638 m_dma = meta_p->dma_addr;
640 encr_offset = ENCR_OFFSET(d_offs);
641 auth_offset = AUTH_OFFSET(d_offs);
642 encr_data_len = ENCR_DLEN(d_lens);
643 auth_data_len = AUTH_DLEN(d_lens);
644 if (unlikely(flags & VALID_AAD_BUF)) {
646 * We dont support both aad
647 * and auth data separately
651 aad_len = fc_params->aad_buf.size;
652 aad_buf = &fc_params->aad_buf;
654 cpt_ctx = fc_params->ctx_buf.vaddr;
655 cipher_type = cpt_ctx->enc_cipher;
656 hash_type = cpt_ctx->hash_type;
657 mac_len = cpt_ctx->mac_len;
660 * Save initial space that followed app data for completion code &
661 * alternate completion code to fall in same cache line as app data
663 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
664 m_dma += COMPLETION_CODE_SIZE;
665 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
668 c_vaddr = (uint8_t *)m_vaddr + size;
669 c_dma = m_dma + size;
670 size += sizeof(cpt_res_s_t);
672 m_vaddr = (uint8_t *)m_vaddr + size;
675 /* start cpt request info struct at 8 byte boundary */
676 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
679 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
681 size += sizeof(struct cpt_request_info);
682 m_vaddr = (uint8_t *)m_vaddr + size;
685 if (unlikely(!(flags & VALID_IV_BUF))) {
687 iv_offset = ENCR_IV_OFFSET(d_offs);
690 if (unlikely(flags & VALID_AAD_BUF)) {
692 * When AAD is given, data above encr_offset is pass through
693 * Since AAD is given as separate pointer and not as offset,
694 * this is a special case as we need to fragment input data
695 * into passthrough + encr_data and then insert AAD in between.
697 if (hash_type != GMAC_TYPE) {
698 passthrough_len = encr_offset;
699 auth_offset = passthrough_len + iv_len;
700 encr_offset = passthrough_len + aad_len + iv_len;
701 auth_data_len = aad_len + encr_data_len;
703 passthrough_len = 16 + aad_len;
704 auth_offset = passthrough_len + iv_len;
705 auth_data_len = aad_len;
708 encr_offset += iv_len;
709 auth_offset += iv_len;
713 opcode.s.major = CPT_MAJOR_OP_FC;
716 if (hash_type == GMAC_TYPE) {
721 auth_dlen = auth_offset + auth_data_len;
722 enc_dlen = encr_data_len + encr_offset;
723 if (unlikely(encr_data_len & 0xf)) {
724 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
725 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
726 else if (likely((cipher_type == AES_CBC) ||
727 (cipher_type == AES_ECB)))
728 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
731 if (unlikely(auth_dlen > enc_dlen)) {
732 inputlen = auth_dlen;
733 outputlen = auth_dlen + mac_len;
736 outputlen = enc_dlen + mac_len;
741 vq_cmd_w0.s.param1 = encr_data_len;
742 vq_cmd_w0.s.param2 = auth_data_len;
744 * In 83XX since we have a limitation of
745 * IV & Offset control word not part of instruction
746 * and need to be part of Data Buffer, we check if
747 * head room is there and then only do the Direct mode processing
749 if (likely((flags & SINGLE_BUF_INPLACE) &&
750 (flags & SINGLE_BUF_HEADTAILROOM))) {
751 void *dm_vaddr = fc_params->bufs[0].vaddr;
752 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
754 * This flag indicates that there is 24 bytes head room and
755 * 8 bytes tail room available, so that we get to do
756 * DIRECT MODE with limitation
759 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
760 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
763 req->ist.ei1 = offset_dma;
764 /* RPTR should just exclude offset control word */
765 req->ist.ei2 = dm_dma_addr - iv_len;
766 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
767 + outputlen - iv_len);
769 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
771 vq_cmd_w0.s.opcode = opcode.flags;
773 if (likely(iv_len)) {
774 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
776 uint64_t *src = fc_params->iv_buf;
781 *(uint64_t *)offset_vaddr =
782 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
783 ((uint64_t)iv_offset << 8) |
784 ((uint64_t)auth_offset));
787 uint32_t i, g_size_bytes, s_size_bytes;
788 uint64_t dptr_dma, rptr_dma;
789 sg_comp_t *gather_comp;
790 sg_comp_t *scatter_comp;
793 /* This falls under strict SG mode */
794 offset_vaddr = m_vaddr;
796 size = OFF_CTRL_LEN + iv_len;
798 m_vaddr = (uint8_t *)m_vaddr + size;
801 opcode.s.major |= CPT_DMA_MODE;
803 vq_cmd_w0.s.opcode = opcode.flags;
805 if (likely(iv_len)) {
806 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
808 uint64_t *src = fc_params->iv_buf;
813 *(uint64_t *)offset_vaddr =
814 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
815 ((uint64_t)iv_offset << 8) |
816 ((uint64_t)auth_offset));
818 /* DPTR has SG list */
822 ((uint16_t *)in_buffer)[0] = 0;
823 ((uint16_t *)in_buffer)[1] = 0;
825 /* TODO Add error check if space will be sufficient */
826 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
834 /* Offset control word that includes iv */
835 i = fill_sg_comp(gather_comp, i, offset_dma,
836 OFF_CTRL_LEN + iv_len);
839 size = inputlen - iv_len;
841 uint32_t aad_offset = aad_len ? passthrough_len : 0;
843 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
844 i = fill_sg_comp_from_buf_min(gather_comp, i,
848 i = fill_sg_comp_from_iov(gather_comp, i,
851 aad_buf, aad_offset);
854 if (unlikely(size)) {
855 CPT_LOG_DP_ERR("Insufficient buffer space,"
856 " size %d needed", size);
860 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
861 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
864 * Output Scatter list
868 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
871 if (likely(iv_len)) {
872 i = fill_sg_comp(scatter_comp, i,
873 offset_dma + OFF_CTRL_LEN,
877 /* output data or output data + digest*/
878 if (unlikely(flags & VALID_MAC_BUF)) {
879 size = outputlen - iv_len - mac_len;
881 uint32_t aad_offset =
882 aad_len ? passthrough_len : 0;
884 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
885 i = fill_sg_comp_from_buf_min(
891 i = fill_sg_comp_from_iov(scatter_comp,
899 if (unlikely(size)) {
900 CPT_LOG_DP_ERR("Insufficient buffer"
901 " space, size %d needed",
908 i = fill_sg_comp_from_buf(scatter_comp, i,
909 &fc_params->mac_buf);
912 /* Output including mac */
913 size = outputlen - iv_len;
915 uint32_t aad_offset =
916 aad_len ? passthrough_len : 0;
918 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
919 i = fill_sg_comp_from_buf_min(
925 i = fill_sg_comp_from_iov(scatter_comp,
933 if (unlikely(size)) {
934 CPT_LOG_DP_ERR("Insufficient buffer"
935 " space, size %d needed",
941 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
942 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
944 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
946 /* This is DPTR len incase of SG mode */
947 vq_cmd_w0.s.dlen = size;
949 m_vaddr = (uint8_t *)m_vaddr + size;
952 /* cpt alternate completion address saved earlier */
953 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
954 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
955 rptr_dma = c_dma - 8;
957 req->ist.ei1 = dptr_dma;
958 req->ist.ei2 = rptr_dma;
961 ctx_dma = fc_params->ctx_buf.dma_addr +
962 offsetof(struct cpt_ctx, fctx);
966 vq_cmd_w3.s.cptr = ctx_dma;
968 /* 16 byte aligned cpt res address */
969 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
970 *req->completion_addr = COMPLETION_CODE_INIT;
971 req->comp_baddr = c_dma;
973 /* Fill microcode part of instruction */
974 req->ist.ei0 = vq_cmd_w0.u64;
975 req->ist.ei3 = vq_cmd_w3.u64;
983 static __rte_always_inline void
984 cpt_dec_hmac_prep(uint32_t flags,
987 fc_params_t *fc_params,
991 uint32_t iv_offset = 0, size;
992 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
993 struct cpt_ctx *cpt_ctx;
994 int32_t hash_type, mac_len;
996 struct cpt_request_info *req;
997 buf_ptr_t *meta_p, *aad_buf = NULL;
998 uint32_t encr_offset, auth_offset;
999 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1000 uint32_t passthrough_len = 0;
1001 void *m_vaddr, *offset_vaddr;
1002 uint64_t m_dma, offset_dma, ctx_dma;
1003 opcode_info_t opcode;
1004 vq_cmd_word0_t vq_cmd_w0;
1005 vq_cmd_word3_t vq_cmd_w3;
1009 meta_p = &fc_params->meta_buf;
1010 m_vaddr = meta_p->vaddr;
1011 m_dma = meta_p->dma_addr;
1013 encr_offset = ENCR_OFFSET(d_offs);
1014 auth_offset = AUTH_OFFSET(d_offs);
1015 encr_data_len = ENCR_DLEN(d_lens);
1016 auth_data_len = AUTH_DLEN(d_lens);
1018 if (unlikely(flags & VALID_AAD_BUF)) {
1020 * We dont support both aad
1021 * and auth data separately
1025 aad_len = fc_params->aad_buf.size;
1026 aad_buf = &fc_params->aad_buf;
1029 cpt_ctx = fc_params->ctx_buf.vaddr;
1030 hash_type = cpt_ctx->hash_type;
1031 mac_len = cpt_ctx->mac_len;
1033 if (unlikely(!(flags & VALID_IV_BUF))) {
1035 iv_offset = ENCR_IV_OFFSET(d_offs);
1038 if (unlikely(flags & VALID_AAD_BUF)) {
1040 * When AAD is given, data above encr_offset is pass through
1041 * Since AAD is given as separate pointer and not as offset,
1042 * this is a special case as we need to fragment input data
1043 * into passthrough + encr_data and then insert AAD in between.
1045 if (hash_type != GMAC_TYPE) {
1046 passthrough_len = encr_offset;
1047 auth_offset = passthrough_len + iv_len;
1048 encr_offset = passthrough_len + aad_len + iv_len;
1049 auth_data_len = aad_len + encr_data_len;
1051 passthrough_len = 16 + aad_len;
1052 auth_offset = passthrough_len + iv_len;
1053 auth_data_len = aad_len;
1056 encr_offset += iv_len;
1057 auth_offset += iv_len;
1061 * Save initial space that followed app data for completion code &
1062 * alternate completion code to fall in same cache line as app data
1064 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1065 m_dma += COMPLETION_CODE_SIZE;
1066 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1068 c_vaddr = (uint8_t *)m_vaddr + size;
1069 c_dma = m_dma + size;
1070 size += sizeof(cpt_res_s_t);
1072 m_vaddr = (uint8_t *)m_vaddr + size;
1075 /* start cpt request info structure at 8 byte alignment */
1076 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1079 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1081 size += sizeof(struct cpt_request_info);
1082 m_vaddr = (uint8_t *)m_vaddr + size;
1086 opcode.s.major = CPT_MAJOR_OP_FC;
1089 if (hash_type == GMAC_TYPE) {
1094 enc_dlen = encr_offset + encr_data_len;
1095 auth_dlen = auth_offset + auth_data_len;
1097 if (auth_dlen > enc_dlen) {
1098 inputlen = auth_dlen + mac_len;
1099 outputlen = auth_dlen;
1101 inputlen = enc_dlen + mac_len;
1102 outputlen = enc_dlen;
1106 vq_cmd_w0.s.param1 = encr_data_len;
1107 vq_cmd_w0.s.param2 = auth_data_len;
1110 * In 83XX since we have a limitation of
1111 * IV & Offset control word not part of instruction
1112 * and need to be part of Data Buffer, we check if
1113 * head room is there and then only do the Direct mode processing
1115 if (likely((flags & SINGLE_BUF_INPLACE) &&
1116 (flags & SINGLE_BUF_HEADTAILROOM))) {
1117 void *dm_vaddr = fc_params->bufs[0].vaddr;
1118 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1120 * This flag indicates that there is 24 bytes head room and
1121 * 8 bytes tail room available, so that we get to do
1122 * DIRECT MODE with limitation
1125 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1126 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1127 req->ist.ei1 = offset_dma;
1129 /* RPTR should just exclude offset control word */
1130 req->ist.ei2 = dm_dma_addr - iv_len;
1132 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1133 outputlen - iv_len);
1134 /* since this is decryption,
1135 * don't touch the content of
1136 * alternate ccode space as it contains
1140 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1142 vq_cmd_w0.s.opcode = opcode.flags;
1144 if (likely(iv_len)) {
1145 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1147 uint64_t *src = fc_params->iv_buf;
1152 *(uint64_t *)offset_vaddr =
1153 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1154 ((uint64_t)iv_offset << 8) |
1155 ((uint64_t)auth_offset));
1158 uint64_t dptr_dma, rptr_dma;
1159 uint32_t g_size_bytes, s_size_bytes;
1160 sg_comp_t *gather_comp;
1161 sg_comp_t *scatter_comp;
1165 /* This falls under strict SG mode */
1166 offset_vaddr = m_vaddr;
1168 size = OFF_CTRL_LEN + iv_len;
1170 m_vaddr = (uint8_t *)m_vaddr + size;
1173 opcode.s.major |= CPT_DMA_MODE;
1175 vq_cmd_w0.s.opcode = opcode.flags;
1177 if (likely(iv_len)) {
1178 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1180 uint64_t *src = fc_params->iv_buf;
1185 *(uint64_t *)offset_vaddr =
1186 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1187 ((uint64_t)iv_offset << 8) |
1188 ((uint64_t)auth_offset));
1190 /* DPTR has SG list */
1191 in_buffer = m_vaddr;
1194 ((uint16_t *)in_buffer)[0] = 0;
1195 ((uint16_t *)in_buffer)[1] = 0;
1197 /* TODO Add error check if space will be sufficient */
1198 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1205 /* Offset control word that includes iv */
1206 i = fill_sg_comp(gather_comp, i, offset_dma,
1207 OFF_CTRL_LEN + iv_len);
1209 /* Add input data */
1210 if (flags & VALID_MAC_BUF) {
1211 size = inputlen - iv_len - mac_len;
1213 /* input data only */
1214 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1215 i = fill_sg_comp_from_buf_min(
1220 uint32_t aad_offset = aad_len ?
1221 passthrough_len : 0;
1223 i = fill_sg_comp_from_iov(gather_comp,
1230 if (unlikely(size)) {
1231 CPT_LOG_DP_ERR("Insufficient buffer"
1232 " space, size %d needed",
1240 i = fill_sg_comp_from_buf(gather_comp, i,
1241 &fc_params->mac_buf);
1244 /* input data + mac */
1245 size = inputlen - iv_len;
1247 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1248 i = fill_sg_comp_from_buf_min(
1253 uint32_t aad_offset = aad_len ?
1254 passthrough_len : 0;
1256 if (unlikely(!fc_params->src_iov)) {
1257 CPT_LOG_DP_ERR("Bad input args");
1261 i = fill_sg_comp_from_iov(
1269 if (unlikely(size)) {
1270 CPT_LOG_DP_ERR("Insufficient buffer"
1271 " space, size %d needed",
1277 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1278 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1281 * Output Scatter List
1286 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1290 i = fill_sg_comp(scatter_comp, i,
1291 offset_dma + OFF_CTRL_LEN,
1295 /* Add output data */
1296 size = outputlen - iv_len;
1298 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1299 /* handle single buffer here */
1300 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1304 uint32_t aad_offset = aad_len ?
1305 passthrough_len : 0;
1307 if (unlikely(!fc_params->dst_iov)) {
1308 CPT_LOG_DP_ERR("Bad input args");
1312 i = fill_sg_comp_from_iov(scatter_comp, i,
1313 fc_params->dst_iov, 0,
1318 if (unlikely(size)) {
1319 CPT_LOG_DP_ERR("Insufficient buffer space,"
1320 " size %d needed", size);
1325 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1326 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1328 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1330 /* This is DPTR len incase of SG mode */
1331 vq_cmd_w0.s.dlen = size;
1333 m_vaddr = (uint8_t *)m_vaddr + size;
1336 /* cpt alternate completion address saved earlier */
1337 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1338 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1339 rptr_dma = c_dma - 8;
1340 size += COMPLETION_CODE_SIZE;
1342 req->ist.ei1 = dptr_dma;
1343 req->ist.ei2 = rptr_dma;
1346 ctx_dma = fc_params->ctx_buf.dma_addr +
1347 offsetof(struct cpt_ctx, fctx);
1350 vq_cmd_w3.s.grp = 0;
1351 vq_cmd_w3.s.cptr = ctx_dma;
1353 /* 16 byte aligned cpt res address */
1354 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1355 *req->completion_addr = COMPLETION_CODE_INIT;
1356 req->comp_baddr = c_dma;
1358 /* Fill microcode part of instruction */
1359 req->ist.ei0 = vq_cmd_w0.u64;
1360 req->ist.ei3 = vq_cmd_w3.u64;
1368 static __rte_always_inline void
1369 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1372 fc_params_t *params,
1377 int32_t inputlen, outputlen;
1378 struct cpt_ctx *cpt_ctx;
1379 uint32_t mac_len = 0;
1381 struct cpt_request_info *req;
1383 uint32_t encr_offset = 0, auth_offset = 0;
1384 uint32_t encr_data_len = 0, auth_data_len = 0;
1385 int flags, iv_len = 16;
1386 void *m_vaddr, *c_vaddr;
1387 uint64_t m_dma, c_dma, offset_ctrl;
1388 uint64_t *offset_vaddr, offset_dma;
1389 uint32_t *iv_s, iv[4];
1390 vq_cmd_word0_t vq_cmd_w0;
1391 vq_cmd_word3_t vq_cmd_w3;
1392 opcode_info_t opcode;
1394 buf_p = ¶ms->meta_buf;
1395 m_vaddr = buf_p->vaddr;
1396 m_dma = buf_p->dma_addr;
1398 cpt_ctx = params->ctx_buf.vaddr;
1399 flags = cpt_ctx->zsk_flags;
1400 mac_len = cpt_ctx->mac_len;
1401 snow3g = cpt_ctx->snow3g;
1404 * Save initial space that followed app data for completion code &
1405 * alternate completion code to fall in same cache line as app data
1407 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1408 m_dma += COMPLETION_CODE_SIZE;
1409 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1412 c_vaddr = (uint8_t *)m_vaddr + size;
1413 c_dma = m_dma + size;
1414 size += sizeof(cpt_res_s_t);
1416 m_vaddr = (uint8_t *)m_vaddr + size;
1419 /* Reserve memory for cpt request info */
1422 size = sizeof(struct cpt_request_info);
1423 m_vaddr = (uint8_t *)m_vaddr + size;
1426 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1428 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1430 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1431 (0 << 3) | (flags & 0x7));
1435 * Microcode expects offsets in bytes
1436 * TODO: Rounding off
1438 auth_data_len = AUTH_DLEN(d_lens);
1441 auth_offset = AUTH_OFFSET(d_offs);
1442 auth_offset = auth_offset / 8;
1444 /* consider iv len */
1445 auth_offset += iv_len;
1447 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1448 outputlen = mac_len;
1450 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1455 * Microcode expects offsets in bytes
1456 * TODO: Rounding off
1458 encr_data_len = ENCR_DLEN(d_lens);
1460 encr_offset = ENCR_OFFSET(d_offs);
1461 encr_offset = encr_offset / 8;
1462 /* consider iv len */
1463 encr_offset += iv_len;
1465 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1466 outputlen = inputlen;
1468 /* iv offset is 0 */
1469 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1473 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1478 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1479 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1482 for (j = 0; j < 4; j++)
1483 iv[j] = iv_s[3 - j];
1485 /* ZUC doesn't need a swap */
1486 for (j = 0; j < 4; j++)
1491 * GP op header, lengths are expected in bits.
1494 vq_cmd_w0.s.param1 = encr_data_len;
1495 vq_cmd_w0.s.param2 = auth_data_len;
1498 * In 83XX since we have a limitation of
1499 * IV & Offset control word not part of instruction
1500 * and need to be part of Data Buffer, we check if
1501 * head room is there and then only do the Direct mode processing
1503 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1504 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1505 void *dm_vaddr = params->bufs[0].vaddr;
1506 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1508 * This flag indicates that there is 24 bytes head room and
1509 * 8 bytes tail room available, so that we get to do
1510 * DIRECT MODE with limitation
1513 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1514 OFF_CTRL_LEN - iv_len);
1515 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1518 req->ist.ei1 = offset_dma;
1519 /* RPTR should just exclude offset control word */
1520 req->ist.ei2 = dm_dma_addr - iv_len;
1521 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1522 + outputlen - iv_len);
1524 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1526 vq_cmd_w0.s.opcode = opcode.flags;
1528 if (likely(iv_len)) {
1529 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1531 memcpy(iv_d, iv, 16);
1534 *offset_vaddr = offset_ctrl;
1536 uint32_t i, g_size_bytes, s_size_bytes;
1537 uint64_t dptr_dma, rptr_dma;
1538 sg_comp_t *gather_comp;
1539 sg_comp_t *scatter_comp;
1543 /* save space for iv */
1544 offset_vaddr = m_vaddr;
1547 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1548 m_dma += OFF_CTRL_LEN + iv_len;
1550 opcode.s.major |= CPT_DMA_MODE;
1552 vq_cmd_w0.s.opcode = opcode.flags;
1554 /* DPTR has SG list */
1555 in_buffer = m_vaddr;
1558 ((uint16_t *)in_buffer)[0] = 0;
1559 ((uint16_t *)in_buffer)[1] = 0;
1561 /* TODO Add error check if space will be sufficient */
1562 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1569 /* Offset control word followed by iv */
1571 i = fill_sg_comp(gather_comp, i, offset_dma,
1572 OFF_CTRL_LEN + iv_len);
1574 /* iv offset is 0 */
1575 *offset_vaddr = offset_ctrl;
1577 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1578 memcpy(iv_d, iv, 16);
1581 size = inputlen - iv_len;
1583 i = fill_sg_comp_from_iov(gather_comp, i,
1586 if (unlikely(size)) {
1587 CPT_LOG_DP_ERR("Insufficient buffer space,"
1588 " size %d needed", size);
1592 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1593 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1596 * Output Scatter List
1601 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1604 /* IV in SLIST only for EEA3 & UEA2 */
1609 i = fill_sg_comp(scatter_comp, i,
1610 offset_dma + OFF_CTRL_LEN, iv_len);
1613 /* Add output data */
1614 if (req_flags & VALID_MAC_BUF) {
1615 size = outputlen - iv_len - mac_len;
1617 i = fill_sg_comp_from_iov(scatter_comp, i,
1621 if (unlikely(size)) {
1622 CPT_LOG_DP_ERR("Insufficient buffer space,"
1623 " size %d needed", size);
1630 i = fill_sg_comp_from_buf(scatter_comp, i,
1634 /* Output including mac */
1635 size = outputlen - iv_len;
1637 i = fill_sg_comp_from_iov(scatter_comp, i,
1641 if (unlikely(size)) {
1642 CPT_LOG_DP_ERR("Insufficient buffer space,"
1643 " size %d needed", size);
1648 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1649 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1651 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1653 /* This is DPTR len incase of SG mode */
1654 vq_cmd_w0.s.dlen = size;
1656 m_vaddr = (uint8_t *)m_vaddr + size;
1659 /* cpt alternate completion address saved earlier */
1660 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1661 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1662 rptr_dma = c_dma - 8;
1664 req->ist.ei1 = dptr_dma;
1665 req->ist.ei2 = rptr_dma;
1670 vq_cmd_w3.s.grp = 0;
1671 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1672 offsetof(struct cpt_ctx, zs_ctx);
1674 /* 16 byte aligned cpt res address */
1675 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1676 *req->completion_addr = COMPLETION_CODE_INIT;
1677 req->comp_baddr = c_dma;
1679 /* Fill microcode part of instruction */
1680 req->ist.ei0 = vq_cmd_w0.u64;
1681 req->ist.ei3 = vq_cmd_w3.u64;
1689 static __rte_always_inline void
1690 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1693 fc_params_t *params,
1698 int32_t inputlen = 0, outputlen;
1699 struct cpt_ctx *cpt_ctx;
1700 uint8_t snow3g, iv_len = 16;
1701 struct cpt_request_info *req;
1703 uint32_t encr_offset;
1704 uint32_t encr_data_len;
1706 void *m_vaddr, *c_vaddr;
1707 uint64_t m_dma, c_dma;
1708 uint64_t *offset_vaddr, offset_dma;
1709 uint32_t *iv_s, iv[4], j;
1710 vq_cmd_word0_t vq_cmd_w0;
1711 vq_cmd_word3_t vq_cmd_w3;
1712 opcode_info_t opcode;
1714 buf_p = ¶ms->meta_buf;
1715 m_vaddr = buf_p->vaddr;
1716 m_dma = buf_p->dma_addr;
1719 * Microcode expects offsets in bytes
1720 * TODO: Rounding off
1722 encr_offset = ENCR_OFFSET(d_offs) / 8;
1723 encr_data_len = ENCR_DLEN(d_lens);
1725 cpt_ctx = params->ctx_buf.vaddr;
1726 flags = cpt_ctx->zsk_flags;
1727 snow3g = cpt_ctx->snow3g;
1729 * Save initial space that followed app data for completion code &
1730 * alternate completion code to fall in same cache line as app data
1732 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1733 m_dma += COMPLETION_CODE_SIZE;
1734 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1737 c_vaddr = (uint8_t *)m_vaddr + size;
1738 c_dma = m_dma + size;
1739 size += sizeof(cpt_res_s_t);
1741 m_vaddr = (uint8_t *)m_vaddr + size;
1744 /* Reserve memory for cpt request info */
1747 size = sizeof(struct cpt_request_info);
1748 m_vaddr = (uint8_t *)m_vaddr + size;
1751 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1753 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1755 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1756 (0 << 3) | (flags & 0x7));
1758 /* consider iv len */
1759 encr_offset += iv_len;
1761 inputlen = encr_offset +
1762 (RTE_ALIGN(encr_data_len, 8) / 8);
1763 outputlen = inputlen;
1766 iv_s = params->iv_buf;
1769 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1770 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1773 for (j = 0; j < 4; j++)
1774 iv[j] = iv_s[3 - j];
1776 /* ZUC doesn't need a swap */
1777 for (j = 0; j < 4; j++)
1782 * GP op header, lengths are expected in bits.
1785 vq_cmd_w0.s.param1 = encr_data_len;
1788 * In 83XX since we have a limitation of
1789 * IV & Offset control word not part of instruction
1790 * and need to be part of Data Buffer, we check if
1791 * head room is there and then only do the Direct mode processing
1793 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1794 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1795 void *dm_vaddr = params->bufs[0].vaddr;
1796 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1798 * This flag indicates that there is 24 bytes head room and
1799 * 8 bytes tail room available, so that we get to do
1800 * DIRECT MODE with limitation
1803 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1804 OFF_CTRL_LEN - iv_len);
1805 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1808 req->ist.ei1 = offset_dma;
1809 /* RPTR should just exclude offset control word */
1810 req->ist.ei2 = dm_dma_addr - iv_len;
1811 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1812 + outputlen - iv_len);
1814 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1816 vq_cmd_w0.s.opcode = opcode.flags;
1818 if (likely(iv_len)) {
1819 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1821 memcpy(iv_d, iv, 16);
1824 /* iv offset is 0 */
1825 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1827 uint32_t i, g_size_bytes, s_size_bytes;
1828 uint64_t dptr_dma, rptr_dma;
1829 sg_comp_t *gather_comp;
1830 sg_comp_t *scatter_comp;
1834 /* save space for offset and iv... */
1835 offset_vaddr = m_vaddr;
1838 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1839 m_dma += OFF_CTRL_LEN + iv_len;
1841 opcode.s.major |= CPT_DMA_MODE;
1843 vq_cmd_w0.s.opcode = opcode.flags;
1845 /* DPTR has SG list */
1846 in_buffer = m_vaddr;
1849 ((uint16_t *)in_buffer)[0] = 0;
1850 ((uint16_t *)in_buffer)[1] = 0;
1852 /* TODO Add error check if space will be sufficient */
1853 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1860 /* Offset control word */
1862 /* iv offset is 0 */
1863 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1865 i = fill_sg_comp(gather_comp, i, offset_dma,
1866 OFF_CTRL_LEN + iv_len);
1868 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1869 memcpy(iv_d, iv, 16);
1871 /* Add input data */
1872 size = inputlen - iv_len;
1874 i = fill_sg_comp_from_iov(gather_comp, i,
1877 if (unlikely(size)) {
1878 CPT_LOG_DP_ERR("Insufficient buffer space,"
1879 " size %d needed", size);
1883 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1884 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1887 * Output Scatter List
1892 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1895 i = fill_sg_comp(scatter_comp, i,
1896 offset_dma + OFF_CTRL_LEN,
1899 /* Add output data */
1900 size = outputlen - iv_len;
1902 i = fill_sg_comp_from_iov(scatter_comp, i,
1906 if (unlikely(size)) {
1907 CPT_LOG_DP_ERR("Insufficient buffer space,"
1908 " size %d needed", size);
1912 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1913 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1915 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1917 /* This is DPTR len incase of SG mode */
1918 vq_cmd_w0.s.dlen = size;
1920 m_vaddr = (uint8_t *)m_vaddr + size;
1923 /* cpt alternate completion address saved earlier */
1924 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1925 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1926 rptr_dma = c_dma - 8;
1928 req->ist.ei1 = dptr_dma;
1929 req->ist.ei2 = rptr_dma;
1934 vq_cmd_w3.s.grp = 0;
1935 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1936 offsetof(struct cpt_ctx, zs_ctx);
1938 /* 16 byte aligned cpt res address */
1939 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1940 *req->completion_addr = COMPLETION_CODE_INIT;
1941 req->comp_baddr = c_dma;
1943 /* Fill microcode part of instruction */
1944 req->ist.ei0 = vq_cmd_w0.u64;
1945 req->ist.ei3 = vq_cmd_w3.u64;
1953 static __rte_always_inline void
1954 cpt_kasumi_enc_prep(uint32_t req_flags,
1957 fc_params_t *params,
1962 int32_t inputlen = 0, outputlen = 0;
1963 struct cpt_ctx *cpt_ctx;
1964 uint32_t mac_len = 0;
1966 struct cpt_request_info *req;
1968 uint32_t encr_offset, auth_offset;
1969 uint32_t encr_data_len, auth_data_len;
1971 uint8_t *iv_s, *iv_d, iv_len = 8;
1973 void *m_vaddr, *c_vaddr;
1974 uint64_t m_dma, c_dma;
1975 uint64_t *offset_vaddr, offset_dma;
1976 vq_cmd_word0_t vq_cmd_w0;
1977 vq_cmd_word3_t vq_cmd_w3;
1978 opcode_info_t opcode;
1980 uint32_t g_size_bytes, s_size_bytes;
1981 uint64_t dptr_dma, rptr_dma;
1982 sg_comp_t *gather_comp;
1983 sg_comp_t *scatter_comp;
1985 buf_p = ¶ms->meta_buf;
1986 m_vaddr = buf_p->vaddr;
1987 m_dma = buf_p->dma_addr;
1989 encr_offset = ENCR_OFFSET(d_offs) / 8;
1990 auth_offset = AUTH_OFFSET(d_offs) / 8;
1991 encr_data_len = ENCR_DLEN(d_lens);
1992 auth_data_len = AUTH_DLEN(d_lens);
1994 cpt_ctx = params->ctx_buf.vaddr;
1995 flags = cpt_ctx->zsk_flags;
1996 mac_len = cpt_ctx->mac_len;
1999 iv_s = params->iv_buf;
2001 iv_s = params->auth_iv_buf;
2003 dir = iv_s[8] & 0x1;
2006 * Save initial space that followed app data for completion code &
2007 * alternate completion code to fall in same cache line as app data
2009 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2010 m_dma += COMPLETION_CODE_SIZE;
2011 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2014 c_vaddr = (uint8_t *)m_vaddr + size;
2015 c_dma = m_dma + size;
2016 size += sizeof(cpt_res_s_t);
2018 m_vaddr = (uint8_t *)m_vaddr + size;
2021 /* Reserve memory for cpt request info */
2024 size = sizeof(struct cpt_request_info);
2025 m_vaddr = (uint8_t *)m_vaddr + size;
2028 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2030 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2031 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2032 (dir << 4) | (0 << 3) | (flags & 0x7));
2035 * GP op header, lengths are expected in bits.
2038 vq_cmd_w0.s.param1 = encr_data_len;
2039 vq_cmd_w0.s.param2 = auth_data_len;
2040 vq_cmd_w0.s.opcode = opcode.flags;
2042 /* consider iv len */
2044 encr_offset += iv_len;
2045 auth_offset += iv_len;
2048 /* save space for offset ctrl and iv */
2049 offset_vaddr = m_vaddr;
2052 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2053 m_dma += OFF_CTRL_LEN + iv_len;
2055 /* DPTR has SG list */
2056 in_buffer = m_vaddr;
2059 ((uint16_t *)in_buffer)[0] = 0;
2060 ((uint16_t *)in_buffer)[1] = 0;
2062 /* TODO Add error check if space will be sufficient */
2063 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2070 /* Offset control word followed by iv */
2073 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2074 outputlen = inputlen;
2075 /* iv offset is 0 */
2076 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2078 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2079 outputlen = mac_len;
2080 /* iv offset is 0 */
2081 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2084 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2087 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2088 memcpy(iv_d, iv_s, iv_len);
2091 size = inputlen - iv_len;
2093 i = fill_sg_comp_from_iov(gather_comp, i,
2097 if (unlikely(size)) {
2098 CPT_LOG_DP_ERR("Insufficient buffer space,"
2099 " size %d needed", size);
2103 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2104 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2107 * Output Scatter List
2111 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2114 /* IV in SLIST only for F8 */
2120 i = fill_sg_comp(scatter_comp, i,
2121 offset_dma + OFF_CTRL_LEN,
2125 /* Add output data */
2126 if (req_flags & VALID_MAC_BUF) {
2127 size = outputlen - iv_len - mac_len;
2129 i = fill_sg_comp_from_iov(scatter_comp, i,
2133 if (unlikely(size)) {
2134 CPT_LOG_DP_ERR("Insufficient buffer space,"
2135 " size %d needed", size);
2142 i = fill_sg_comp_from_buf(scatter_comp, i,
2146 /* Output including mac */
2147 size = outputlen - iv_len;
2149 i = fill_sg_comp_from_iov(scatter_comp, i,
2153 if (unlikely(size)) {
2154 CPT_LOG_DP_ERR("Insufficient buffer space,"
2155 " size %d needed", size);
2160 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2161 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2163 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2165 /* This is DPTR len incase of SG mode */
2166 vq_cmd_w0.s.dlen = size;
2168 m_vaddr = (uint8_t *)m_vaddr + size;
2171 /* cpt alternate completion address saved earlier */
2172 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2173 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2174 rptr_dma = c_dma - 8;
2176 req->ist.ei1 = dptr_dma;
2177 req->ist.ei2 = rptr_dma;
2181 vq_cmd_w3.s.grp = 0;
2182 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2183 offsetof(struct cpt_ctx, k_ctx);
2185 /* 16 byte aligned cpt res address */
2186 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2187 *req->completion_addr = COMPLETION_CODE_INIT;
2188 req->comp_baddr = c_dma;
2190 /* Fill microcode part of instruction */
2191 req->ist.ei0 = vq_cmd_w0.u64;
2192 req->ist.ei3 = vq_cmd_w3.u64;
2200 static __rte_always_inline void
2201 cpt_kasumi_dec_prep(uint64_t d_offs,
2203 fc_params_t *params,
2208 int32_t inputlen = 0, outputlen;
2209 struct cpt_ctx *cpt_ctx;
2210 uint8_t i = 0, iv_len = 8;
2211 struct cpt_request_info *req;
2213 uint32_t encr_offset;
2214 uint32_t encr_data_len;
2217 void *m_vaddr, *c_vaddr;
2218 uint64_t m_dma, c_dma;
2219 uint64_t *offset_vaddr, offset_dma;
2220 vq_cmd_word0_t vq_cmd_w0;
2221 vq_cmd_word3_t vq_cmd_w3;
2222 opcode_info_t opcode;
2224 uint32_t g_size_bytes, s_size_bytes;
2225 uint64_t dptr_dma, rptr_dma;
2226 sg_comp_t *gather_comp;
2227 sg_comp_t *scatter_comp;
2229 buf_p = ¶ms->meta_buf;
2230 m_vaddr = buf_p->vaddr;
2231 m_dma = buf_p->dma_addr;
2233 encr_offset = ENCR_OFFSET(d_offs) / 8;
2234 encr_data_len = ENCR_DLEN(d_lens);
2236 cpt_ctx = params->ctx_buf.vaddr;
2237 flags = cpt_ctx->zsk_flags;
2239 * Save initial space that followed app data for completion code &
2240 * alternate completion code to fall in same cache line as app data
2242 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2243 m_dma += COMPLETION_CODE_SIZE;
2244 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2247 c_vaddr = (uint8_t *)m_vaddr + size;
2248 c_dma = m_dma + size;
2249 size += sizeof(cpt_res_s_t);
2251 m_vaddr = (uint8_t *)m_vaddr + size;
2254 /* Reserve memory for cpt request info */
2257 size = sizeof(struct cpt_request_info);
2258 m_vaddr = (uint8_t *)m_vaddr + size;
2261 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2263 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2264 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2265 (dir << 4) | (0 << 3) | (flags & 0x7));
2268 * GP op header, lengths are expected in bits.
2271 vq_cmd_w0.s.param1 = encr_data_len;
2272 vq_cmd_w0.s.opcode = opcode.flags;
2274 /* consider iv len */
2275 encr_offset += iv_len;
2277 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2278 outputlen = inputlen;
2280 /* save space for offset ctrl & iv */
2281 offset_vaddr = m_vaddr;
2284 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2285 m_dma += OFF_CTRL_LEN + iv_len;
2287 /* DPTR has SG list */
2288 in_buffer = m_vaddr;
2291 ((uint16_t *)in_buffer)[0] = 0;
2292 ((uint16_t *)in_buffer)[1] = 0;
2294 /* TODO Add error check if space will be sufficient */
2295 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2302 /* Offset control word followed by iv */
2303 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2305 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2308 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2309 params->iv_buf, iv_len);
2311 /* Add input data */
2312 size = inputlen - iv_len;
2314 i = fill_sg_comp_from_iov(gather_comp, i,
2317 if (unlikely(size)) {
2318 CPT_LOG_DP_ERR("Insufficient buffer space,"
2319 " size %d needed", size);
2323 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2324 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2327 * Output Scatter List
2331 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2334 i = fill_sg_comp(scatter_comp, i,
2335 offset_dma + OFF_CTRL_LEN,
2338 /* Add output data */
2339 size = outputlen - iv_len;
2341 i = fill_sg_comp_from_iov(scatter_comp, i,
2344 if (unlikely(size)) {
2345 CPT_LOG_DP_ERR("Insufficient buffer space,"
2346 " size %d needed", size);
2350 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2351 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2353 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2355 /* This is DPTR len incase of SG mode */
2356 vq_cmd_w0.s.dlen = size;
2358 m_vaddr = (uint8_t *)m_vaddr + size;
2361 /* cpt alternate completion address saved earlier */
2362 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2363 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2364 rptr_dma = c_dma - 8;
2366 req->ist.ei1 = dptr_dma;
2367 req->ist.ei2 = rptr_dma;
2371 vq_cmd_w3.s.grp = 0;
2372 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2373 offsetof(struct cpt_ctx, k_ctx);
2375 /* 16 byte aligned cpt res address */
2376 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2377 *req->completion_addr = COMPLETION_CODE_INIT;
2378 req->comp_baddr = c_dma;
2380 /* Fill microcode part of instruction */
2381 req->ist.ei0 = vq_cmd_w0.u64;
2382 req->ist.ei3 = vq_cmd_w3.u64;
2390 static __rte_always_inline void *
2391 cpt_fc_dec_hmac_prep(uint32_t flags,
2394 fc_params_t *fc_params,
2397 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2399 void *prep_req = NULL;
2401 fc_type = ctx->fc_type;
2403 if (likely(fc_type == FC_GEN)) {
2404 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2406 } else if (fc_type == ZUC_SNOW3G) {
2407 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2409 } else if (fc_type == KASUMI) {
2410 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2414 * For AUTH_ONLY case,
2415 * MC only supports digest generation and verification
2416 * should be done in software by memcmp()
2422 static __rte_always_inline void *__rte_hot
2423 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2424 fc_params_t *fc_params, void *op)
2426 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2428 void *prep_req = NULL;
2430 fc_type = ctx->fc_type;
2432 /* Common api for rest of the ops */
2433 if (likely(fc_type == FC_GEN)) {
2434 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2436 } else if (fc_type == ZUC_SNOW3G) {
2437 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2439 } else if (fc_type == KASUMI) {
2440 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2442 } else if (fc_type == HASH_HMAC) {
2443 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2449 static __rte_always_inline int
2450 cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
2451 uint16_t key_len, uint16_t mac_len)
2453 struct cpt_ctx *cpt_ctx = ctx;
2454 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2456 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2461 /* No support for AEAD yet */
2462 if (cpt_ctx->enc_cipher)
2464 /* For ZUC/SNOW3G/Kasumi */
2467 cpt_ctx->snow3g = 1;
2468 gen_key_snow3g(key, keyx);
2469 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2470 cpt_ctx->fc_type = ZUC_SNOW3G;
2471 cpt_ctx->zsk_flags = 0x1;
2474 cpt_ctx->snow3g = 0;
2475 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2476 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2477 cpt_ctx->fc_type = ZUC_SNOW3G;
2478 cpt_ctx->zsk_flags = 0x1;
2481 /* Kasumi ECB mode */
2483 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2484 cpt_ctx->fc_type = KASUMI;
2485 cpt_ctx->zsk_flags = 0x1;
2488 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2489 cpt_ctx->fc_type = KASUMI;
2490 cpt_ctx->zsk_flags = 0x1;
2495 cpt_ctx->mac_len = 4;
2496 cpt_ctx->hash_type = type;
2500 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2501 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2502 cpt_ctx->fc_type = HASH_HMAC;
2505 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2508 /* For GMAC auth, cipher must be NULL */
2509 if (type == GMAC_TYPE)
2510 fctx->enc.enc_cipher = 0;
2512 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2513 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2517 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2518 memcpy(cpt_ctx->auth_key, key, key_len);
2519 cpt_ctx->auth_key_len = key_len;
2520 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2521 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2524 memcpy(fctx->hmac.opad, key, key_len);
2525 fctx->enc.auth_input_type = 1;
2530 static __rte_always_inline int
2531 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2532 struct cpt_sess_misc *sess)
2534 struct rte_crypto_aead_xform *aead_form;
2535 cipher_type_t enc_type = 0; /* NULL Cipher type */
2536 auth_type_t auth_type = 0; /* NULL Auth type */
2537 uint32_t cipher_key_len = 0;
2538 uint8_t aes_gcm = 0;
2539 aead_form = &xform->aead;
2540 void *ctx = SESS_PRIV(sess);
2542 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2543 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2544 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2545 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2546 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2547 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2548 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2549 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2551 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2554 switch (aead_form->algo) {
2555 case RTE_CRYPTO_AEAD_AES_GCM:
2557 cipher_key_len = 16;
2560 case RTE_CRYPTO_AEAD_AES_CCM:
2561 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2565 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2569 if (aead_form->key.length < cipher_key_len) {
2570 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2571 (unsigned int long)aead_form->key.length);
2575 sess->aes_gcm = aes_gcm;
2576 sess->mac_len = aead_form->digest_length;
2577 sess->iv_offset = aead_form->iv.offset;
2578 sess->iv_length = aead_form->iv.length;
2579 sess->aad_length = aead_form->aad_length;
2581 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2582 aead_form->key.length, NULL)))
2585 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2586 aead_form->digest_length)))
2592 static __rte_always_inline int
2593 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2594 struct cpt_sess_misc *sess)
2596 struct rte_crypto_cipher_xform *c_form;
2597 cipher_type_t enc_type = 0; /* NULL Cipher type */
2598 uint32_t cipher_key_len = 0;
2599 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2601 c_form = &xform->cipher;
2603 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2604 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2605 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2606 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2608 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2612 switch (c_form->algo) {
2613 case RTE_CRYPTO_CIPHER_AES_CBC:
2615 cipher_key_len = 16;
2617 case RTE_CRYPTO_CIPHER_3DES_CBC:
2618 enc_type = DES3_CBC;
2619 cipher_key_len = 24;
2621 case RTE_CRYPTO_CIPHER_DES_CBC:
2622 /* DES is implemented using 3DES in hardware */
2623 enc_type = DES3_CBC;
2626 case RTE_CRYPTO_CIPHER_AES_CTR:
2628 cipher_key_len = 16;
2631 case RTE_CRYPTO_CIPHER_NULL:
2635 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2636 enc_type = KASUMI_F8_ECB;
2637 cipher_key_len = 16;
2640 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2641 enc_type = SNOW3G_UEA2;
2642 cipher_key_len = 16;
2645 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2646 enc_type = ZUC_EEA3;
2647 cipher_key_len = 16;
2650 case RTE_CRYPTO_CIPHER_AES_XTS:
2652 cipher_key_len = 16;
2654 case RTE_CRYPTO_CIPHER_3DES_ECB:
2655 enc_type = DES3_ECB;
2656 cipher_key_len = 24;
2658 case RTE_CRYPTO_CIPHER_AES_ECB:
2660 cipher_key_len = 16;
2662 case RTE_CRYPTO_CIPHER_3DES_CTR:
2663 case RTE_CRYPTO_CIPHER_AES_F8:
2664 case RTE_CRYPTO_CIPHER_ARC4:
2665 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2669 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2674 if (c_form->key.length < cipher_key_len) {
2675 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2676 (unsigned long) c_form->key.length);
2680 sess->zsk_flag = zsk_flag;
2682 sess->aes_ctr = aes_ctr;
2683 sess->iv_offset = c_form->iv.offset;
2684 sess->iv_length = c_form->iv.length;
2685 sess->is_null = is_null;
2687 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2688 c_form->key.data, c_form->key.length, NULL)))
2694 static __rte_always_inline int
2695 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2696 struct cpt_sess_misc *sess)
2698 struct rte_crypto_auth_xform *a_form;
2699 auth_type_t auth_type = 0; /* NULL Auth type */
2700 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2702 a_form = &xform->auth;
2704 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2705 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2706 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2707 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2709 CPT_LOG_DP_ERR("Unknown auth operation");
2713 switch (a_form->algo) {
2714 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2716 case RTE_CRYPTO_AUTH_SHA1:
2717 auth_type = SHA1_TYPE;
2719 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2720 case RTE_CRYPTO_AUTH_SHA256:
2721 auth_type = SHA2_SHA256;
2723 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2724 case RTE_CRYPTO_AUTH_SHA512:
2725 auth_type = SHA2_SHA512;
2727 case RTE_CRYPTO_AUTH_AES_GMAC:
2728 auth_type = GMAC_TYPE;
2731 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2732 case RTE_CRYPTO_AUTH_SHA224:
2733 auth_type = SHA2_SHA224;
2735 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2736 case RTE_CRYPTO_AUTH_SHA384:
2737 auth_type = SHA2_SHA384;
2739 case RTE_CRYPTO_AUTH_MD5_HMAC:
2740 case RTE_CRYPTO_AUTH_MD5:
2741 auth_type = MD5_TYPE;
2743 case RTE_CRYPTO_AUTH_KASUMI_F9:
2744 auth_type = KASUMI_F9_ECB;
2746 * Indicate that direction needs to be taken out
2751 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2752 auth_type = SNOW3G_UIA2;
2755 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2756 auth_type = ZUC_EIA3;
2759 case RTE_CRYPTO_AUTH_NULL:
2763 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2764 case RTE_CRYPTO_AUTH_AES_CMAC:
2765 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2766 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2770 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2775 sess->zsk_flag = zsk_flag;
2776 sess->aes_gcm = aes_gcm;
2777 sess->mac_len = a_form->digest_length;
2778 sess->is_null = is_null;
2780 sess->auth_iv_offset = a_form->iv.offset;
2781 sess->auth_iv_length = a_form->iv.length;
2783 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2784 a_form->key.data, a_form->key.length,
2785 a_form->digest_length)))
2791 static __rte_always_inline int
2792 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2793 struct cpt_sess_misc *sess)
2795 struct rte_crypto_auth_xform *a_form;
2796 cipher_type_t enc_type = 0; /* NULL Cipher type */
2797 auth_type_t auth_type = 0; /* NULL Auth type */
2798 void *ctx = SESS_PRIV(sess);
2800 a_form = &xform->auth;
2802 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2803 sess->cpt_op |= CPT_OP_ENCODE;
2804 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2805 sess->cpt_op |= CPT_OP_DECODE;
2807 CPT_LOG_DP_ERR("Unknown auth operation");
2811 switch (a_form->algo) {
2812 case RTE_CRYPTO_AUTH_AES_GMAC:
2814 auth_type = GMAC_TYPE;
2817 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2825 sess->iv_offset = a_form->iv.offset;
2826 sess->iv_length = a_form->iv.length;
2827 sess->mac_len = a_form->digest_length;
2829 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2830 a_form->key.length, NULL)))
2833 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2834 a_form->digest_length)))
2840 static __rte_always_inline void *
2841 alloc_op_meta(struct rte_mbuf *m_src,
2844 struct rte_mempool *cpt_meta_pool)
2848 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2849 if (likely(m_src && (m_src->nb_segs == 1))) {
2853 /* Check if tailroom is sufficient to hold meta data */
2854 tailroom = rte_pktmbuf_tailroom(m_src);
2855 if (likely(tailroom > len + 8)) {
2856 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2857 mphys = m_src->buf_physaddr + m_src->buf_len;
2861 buf->dma_addr = mphys;
2863 /* Indicate that this is a mbuf allocated mdata */
2864 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2869 RTE_SET_USED(m_src);
2872 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2876 buf->dma_addr = rte_mempool_virt2iova(mdata);
2883 * cpt_free_metabuf - free metabuf to mempool.
2884 * @param instance: pointer to instance.
2885 * @param objp: pointer to the metabuf.
2887 static __rte_always_inline void
2888 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2890 bool nofree = ((uintptr_t)mdata & 1ull);
2894 rte_mempool_put(cpt_meta_pool, mdata);
2897 static __rte_always_inline uint32_t
2898 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2899 iov_ptr_t *iovec, uint32_t start_offset)
2902 void *seg_data = NULL;
2903 phys_addr_t seg_phys;
2904 int32_t seg_size = 0;
2911 if (!start_offset) {
2912 seg_data = rte_pktmbuf_mtod(pkt, void *);
2913 seg_phys = rte_pktmbuf_mtophys(pkt);
2914 seg_size = pkt->data_len;
2916 while (start_offset >= pkt->data_len) {
2917 start_offset -= pkt->data_len;
2921 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2922 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
2923 seg_size = pkt->data_len - start_offset;
2929 iovec->bufs[index].vaddr = seg_data;
2930 iovec->bufs[index].dma_addr = seg_phys;
2931 iovec->bufs[index].size = seg_size;
2935 while (unlikely(pkt != NULL)) {
2936 seg_data = rte_pktmbuf_mtod(pkt, void *);
2937 seg_phys = rte_pktmbuf_mtophys(pkt);
2938 seg_size = pkt->data_len;
2942 iovec->bufs[index].vaddr = seg_data;
2943 iovec->bufs[index].dma_addr = seg_phys;
2944 iovec->bufs[index].size = seg_size;
2951 iovec->buf_cnt = index;
2955 static __rte_always_inline uint32_t
2956 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2961 void *seg_data = NULL;
2962 phys_addr_t seg_phys;
2963 uint32_t seg_size = 0;
2966 seg_data = rte_pktmbuf_mtod(pkt, void *);
2967 seg_phys = rte_pktmbuf_mtophys(pkt);
2968 seg_size = pkt->data_len;
2971 if (likely(!pkt->next)) {
2972 uint32_t headroom, tailroom;
2974 *flags |= SINGLE_BUF_INPLACE;
2975 headroom = rte_pktmbuf_headroom(pkt);
2976 tailroom = rte_pktmbuf_tailroom(pkt);
2977 if (likely((headroom >= 24) &&
2979 /* In 83XX this is prerequivisit for Direct mode */
2980 *flags |= SINGLE_BUF_HEADTAILROOM;
2982 param->bufs[0].vaddr = seg_data;
2983 param->bufs[0].dma_addr = seg_phys;
2984 param->bufs[0].size = seg_size;
2987 iovec = param->src_iov;
2988 iovec->bufs[index].vaddr = seg_data;
2989 iovec->bufs[index].dma_addr = seg_phys;
2990 iovec->bufs[index].size = seg_size;
2994 while (unlikely(pkt != NULL)) {
2995 seg_data = rte_pktmbuf_mtod(pkt, void *);
2996 seg_phys = rte_pktmbuf_mtophys(pkt);
2997 seg_size = pkt->data_len;
3002 iovec->bufs[index].vaddr = seg_data;
3003 iovec->bufs[index].dma_addr = seg_phys;
3004 iovec->bufs[index].size = seg_size;
3011 iovec->buf_cnt = index;
3015 static __rte_always_inline int
3016 fill_fc_params(struct rte_crypto_op *cop,
3017 struct cpt_sess_misc *sess_misc,
3018 struct cpt_qp_meta_info *m_info,
3023 struct rte_crypto_sym_op *sym_op = cop->sym;
3026 uint32_t mc_hash_off;
3028 uint64_t d_offs, d_lens;
3029 struct rte_mbuf *m_src, *m_dst;
3030 uint8_t cpt_op = sess_misc->cpt_op;
3031 #ifdef CPT_ALWAYS_USE_SG_MODE
3032 uint8_t inplace = 0;
3034 uint8_t inplace = 1;
3036 fc_params_t fc_params;
3037 char src[SRC_IOV_SIZE];
3038 char dst[SRC_IOV_SIZE];
3042 if (likely(sess_misc->iv_length)) {
3043 flags |= VALID_IV_BUF;
3044 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3045 uint8_t *, sess_misc->iv_offset);
3046 if (sess_misc->aes_ctr &&
3047 unlikely(sess_misc->iv_length != 16)) {
3048 memcpy((uint8_t *)iv_buf,
3049 rte_crypto_op_ctod_offset(cop,
3050 uint8_t *, sess_misc->iv_offset), 12);
3051 iv_buf[3] = rte_cpu_to_be_32(0x1);
3052 fc_params.iv_buf = iv_buf;
3056 if (sess_misc->zsk_flag) {
3057 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3059 sess_misc->auth_iv_offset);
3060 if (sess_misc->zsk_flag != ZS_EA)
3063 m_src = sym_op->m_src;
3064 m_dst = sym_op->m_dst;
3066 if (sess_misc->aes_gcm) {
3071 d_offs = sym_op->aead.data.offset;
3072 d_lens = sym_op->aead.data.length;
3073 mc_hash_off = sym_op->aead.data.offset +
3074 sym_op->aead.data.length;
3076 aad_data = sym_op->aead.aad.data;
3077 aad_len = sess_misc->aad_length;
3078 if (likely((aad_data + aad_len) ==
3079 rte_pktmbuf_mtod_offset(m_src,
3081 sym_op->aead.data.offset))) {
3082 d_offs = (d_offs - aad_len) | (d_offs << 16);
3083 d_lens = (d_lens + aad_len) | (d_lens << 32);
3085 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3086 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3087 fc_params.aad_buf.size = aad_len;
3088 flags |= VALID_AAD_BUF;
3090 d_offs = d_offs << 16;
3091 d_lens = d_lens << 32;
3094 salt = fc_params.iv_buf;
3095 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3096 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3097 sess_misc->salt = *(uint32_t *)salt;
3099 fc_params.iv_buf = salt + 4;
3100 if (likely(sess_misc->mac_len)) {
3101 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3107 /* hmac immediately following data is best case */
3108 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3110 (uint8_t *)sym_op->aead.digest.data)) {
3111 flags |= VALID_MAC_BUF;
3112 fc_params.mac_buf.size = sess_misc->mac_len;
3113 fc_params.mac_buf.vaddr =
3114 sym_op->aead.digest.data;
3115 fc_params.mac_buf.dma_addr =
3116 sym_op->aead.digest.phys_addr;
3121 d_offs = sym_op->cipher.data.offset;
3122 d_lens = sym_op->cipher.data.length;
3123 mc_hash_off = sym_op->cipher.data.offset +
3124 sym_op->cipher.data.length;
3125 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3126 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3128 if (mc_hash_off < (sym_op->auth.data.offset +
3129 sym_op->auth.data.length)){
3130 mc_hash_off = (sym_op->auth.data.offset +
3131 sym_op->auth.data.length);
3133 /* for gmac, salt should be updated like in gcm */
3134 if (unlikely(sess_misc->is_gmac)) {
3136 salt = fc_params.iv_buf;
3137 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3138 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3139 sess_misc->salt = *(uint32_t *)salt;
3141 fc_params.iv_buf = salt + 4;
3143 if (likely(sess_misc->mac_len)) {
3146 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3150 /* hmac immediately following data is best case */
3151 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3153 (uint8_t *)sym_op->auth.digest.data)) {
3154 flags |= VALID_MAC_BUF;
3155 fc_params.mac_buf.size =
3157 fc_params.mac_buf.vaddr =
3158 sym_op->auth.digest.data;
3159 fc_params.mac_buf.dma_addr =
3160 sym_op->auth.digest.phys_addr;
3165 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3166 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3168 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3171 if (likely(!m_dst && inplace)) {
3172 /* Case of single buffer without AAD buf or
3173 * separate mac buf in place and
3176 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3178 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3181 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3187 /* Out of place processing */
3188 fc_params.src_iov = (void *)src;
3189 fc_params.dst_iov = (void *)dst;
3191 /* Store SG I/O in the api for reuse */
3192 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3193 CPT_LOG_DP_ERR("Prepare src iov failed");
3198 if (unlikely(m_dst != NULL)) {
3201 /* Try to make room as much as src has */
3202 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3204 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3205 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3206 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3207 CPT_LOG_DP_ERR("Not enough space in "
3216 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3217 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3223 fc_params.dst_iov = (void *)src;
3227 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3228 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3229 m_info->lb_mlen, m_info->pool);
3231 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3232 m_info->sg_mlen, m_info->pool);
3234 if (unlikely(mdata == NULL)) {
3235 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3240 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3241 op[0] = (uintptr_t)mdata;
3242 op[1] = (uintptr_t)cop;
3243 op[2] = op[3] = 0; /* Used to indicate auth verify */
3244 space += 4 * sizeof(uint64_t);
3246 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3247 fc_params.meta_buf.dma_addr += space;
3248 fc_params.meta_buf.size -= space;
3250 /* Finally prepare the instruction */
3251 if (cpt_op & CPT_OP_ENCODE)
3252 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3255 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3258 if (unlikely(*prep_req == NULL)) {
3259 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3261 goto free_mdata_and_exit;
3268 free_mdata_and_exit:
3269 free_op_meta(mdata, m_info->pool);
3274 static __rte_always_inline void
3275 compl_auth_verify(struct rte_crypto_op *op,
3280 struct rte_crypto_sym_op *sym_op = op->sym;
3282 if (sym_op->auth.digest.data)
3283 mac = sym_op->auth.digest.data;
3285 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3287 sym_op->auth.data.length +
3288 sym_op->auth.data.offset);
3290 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3294 if (memcmp(mac, gen_mac, mac_len))
3295 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3297 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3300 static __rte_always_inline void
3301 find_kasumif9_direction_and_length(uint8_t *src,
3302 uint32_t counter_num_bytes,
3303 uint32_t *addr_length_in_bits,
3304 uint8_t *addr_direction)
3309 while (!found && counter_num_bytes > 0) {
3310 counter_num_bytes--;
3311 if (src[counter_num_bytes] == 0x00)
3313 pos = rte_bsf32(src[counter_num_bytes]);
3315 if (likely(counter_num_bytes > 0)) {
3316 last_byte = src[counter_num_bytes - 1];
3317 *addr_direction = last_byte & 0x1;
3318 *addr_length_in_bits = counter_num_bytes * 8
3322 last_byte = src[counter_num_bytes];
3323 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3324 *addr_length_in_bits = counter_num_bytes * 8
3332 * This handles all auth only except AES_GMAC
3334 static __rte_always_inline int
3335 fill_digest_params(struct rte_crypto_op *cop,
3336 struct cpt_sess_misc *sess,
3337 struct cpt_qp_meta_info *m_info,
3342 struct rte_crypto_sym_op *sym_op = cop->sym;
3346 uint32_t auth_range_off;
3348 uint64_t d_offs = 0, d_lens;
3349 struct rte_mbuf *m_src, *m_dst;
3350 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3351 uint16_t mac_len = sess->mac_len;
3353 char src[SRC_IOV_SIZE];
3357 memset(¶ms, 0, sizeof(fc_params_t));
3359 m_src = sym_op->m_src;
3361 /* For just digest lets force mempool alloc */
3362 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3364 if (mdata == NULL) {
3369 mphys = params.meta_buf.dma_addr;
3372 op[0] = (uintptr_t)mdata;
3373 op[1] = (uintptr_t)cop;
3374 op[2] = op[3] = 0; /* Used to indicate auth verify */
3375 space += 4 * sizeof(uint64_t);
3377 auth_range_off = sym_op->auth.data.offset;
3379 flags = VALID_MAC_BUF;
3380 params.src_iov = (void *)src;
3381 if (unlikely(sess->zsk_flag)) {
3383 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3384 * we will send pass through even for auth only case,
3387 d_offs = auth_range_off;
3389 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3390 uint8_t *, sess->auth_iv_offset);
3391 if (sess->zsk_flag == K_F9) {
3392 uint32_t length_in_bits, num_bytes;
3393 uint8_t *src, direction = 0;
3395 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3398 * This is kasumi f9, take direction from
3401 length_in_bits = cop->sym->auth.data.length;
3402 num_bytes = (length_in_bits >> 3);
3403 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3404 find_kasumif9_direction_and_length(src,
3408 length_in_bits -= 64;
3409 cop->sym->auth.data.offset += 64;
3410 d_offs = cop->sym->auth.data.offset;
3411 auth_range_off = d_offs / 8;
3412 cop->sym->auth.data.length = length_in_bits;
3414 /* Store it at end of auth iv */
3415 iv_buf[8] = direction;
3416 params.auth_iv_buf = iv_buf;
3420 d_lens = sym_op->auth.data.length;
3422 params.ctx_buf.vaddr = SESS_PRIV(sess);
3423 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3425 if (auth_op == CPT_OP_AUTH_GENERATE) {
3426 if (sym_op->auth.digest.data) {
3428 * Digest to be generated
3429 * in separate buffer
3431 params.mac_buf.size =
3433 params.mac_buf.vaddr =
3434 sym_op->auth.digest.data;
3435 params.mac_buf.dma_addr =
3436 sym_op->auth.digest.phys_addr;
3438 uint32_t off = sym_op->auth.data.offset +
3439 sym_op->auth.data.length;
3440 int32_t dlen, space;
3442 m_dst = sym_op->m_dst ?
3443 sym_op->m_dst : sym_op->m_src;
3444 dlen = rte_pktmbuf_pkt_len(m_dst);
3446 space = off + mac_len - dlen;
3448 if (!rte_pktmbuf_append(m_dst, space)) {
3449 CPT_LOG_DP_ERR("Failed to extend "
3450 "mbuf by %uB", space);
3452 goto free_mdata_and_exit;
3455 params.mac_buf.vaddr =
3456 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3457 params.mac_buf.dma_addr =
3458 rte_pktmbuf_mtophys_offset(m_dst, off);
3459 params.mac_buf.size = mac_len;
3462 /* Need space for storing generated mac */
3463 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3464 params.mac_buf.dma_addr = mphys + space;
3465 params.mac_buf.size = mac_len;
3466 space += RTE_ALIGN_CEIL(mac_len, 8);
3467 op[2] = (uintptr_t)params.mac_buf.vaddr;
3471 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3472 params.meta_buf.dma_addr = mphys + space;
3473 params.meta_buf.size -= space;
3475 /* Out of place processing */
3476 params.src_iov = (void *)src;
3478 /*Store SG I/O in the api for reuse */
3479 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3480 CPT_LOG_DP_ERR("Prepare src iov failed");
3482 goto free_mdata_and_exit;
3485 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3486 if (unlikely(*prep_req == NULL)) {
3488 goto free_mdata_and_exit;
3495 free_mdata_and_exit:
3496 free_op_meta(mdata, m_info->pool);
3501 #endif /*_CPT_UCODE_H_ */