1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
30 for (i = 0; i < 4; i++) {
32 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 (ck[base + 2] << 8) | (ck[base + 3]);
34 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
38 static __rte_always_inline void
39 cpt_fc_salt_update(void *ctx,
42 struct cpt_ctx *cpt_ctx = ctx;
43 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
46 static __rte_always_inline int
47 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
59 static __rte_always_inline int
60 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
76 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
84 key_len = key_len / 2;
85 if (unlikely(key_len == CPT_BYTE_24)) {
86 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
89 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
95 if (unlikely(key_len != 16))
97 /* No support for AEAD yet */
98 if (unlikely(ctx->hash_type))
100 fc_type = ZUC_SNOW3G;
104 if (unlikely(key_len != 16))
106 /* No support for AEAD yet */
107 if (unlikely(ctx->hash_type))
115 ctx->fc_type = fc_type;
119 static __rte_always_inline void
120 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
122 cpt_ctx->enc_cipher = 0;
123 fctx->enc.enc_cipher = 0;
126 static __rte_always_inline void
127 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
129 mc_aes_type_t aes_key_type = 0;
132 aes_key_type = AES_128_BIT;
135 aes_key_type = AES_192_BIT;
138 aes_key_type = AES_256_BIT;
141 /* This should not happen */
142 CPT_LOG_DP_ERR("Invalid AES key len");
145 fctx->enc.aes_key = aes_key_type;
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
154 gen_key_snow3g(key, keyx);
155 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
156 cpt_ctx->zsk_flags = 0;
159 static __rte_always_inline void
160 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
164 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
165 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
166 cpt_ctx->zsk_flags = 0;
169 static __rte_always_inline void
170 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
174 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
175 cpt_ctx->zsk_flags = 0;
178 static __rte_always_inline void
179 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
182 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
183 cpt_ctx->zsk_flags = 0;
186 static __rte_always_inline int
187 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
188 uint16_t key_len, uint8_t *salt)
190 struct cpt_ctx *cpt_ctx = ctx;
191 mc_fc_context_t *fctx = &cpt_ctx->fctx;
194 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
198 if (cpt_ctx->fc_type == FC_GEN) {
200 * We need to always say IV is from DPTR as user can
201 * sometimes iverride IV per operation.
203 fctx->enc.iv_source = CPT_FROM_DPTR;
205 if (cpt_ctx->auth_key_len > 64)
211 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
214 /* CPT performs DES using 3DES with the 8B DES-key
215 * replicated 2 more times to match the 24B 3DES-key.
216 * Eg. If org. key is "0x0a 0x0b", then new key is
217 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
220 /* Skipping the first 8B as it will be copied
221 * in the regular code flow
223 memcpy(fctx->enc.encr_key+key_len, key, key_len);
224 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
228 /* For DES3_ECB IV need to be from CTX. */
229 fctx->enc.iv_source = CPT_FROM_CTX;
236 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
239 /* Even though iv source is from dptr,
240 * aes_gcm salt is taken from ctx
243 memcpy(fctx->enc.encr_iv, salt, 4);
244 /* Assuming it was just salt update
250 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
253 key_len = key_len / 2;
254 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
256 /* Copy key2 for XTS into ipad */
257 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
258 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
261 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
264 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
267 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
270 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
276 /* Only for FC_GEN case */
278 /* For GMAC auth, cipher must be NULL */
279 if (cpt_ctx->hash_type != GMAC_TYPE)
280 fctx->enc.enc_cipher = type;
282 memcpy(fctx->enc.encr_key, key, key_len);
285 cpt_ctx->enc_cipher = type;
290 static __rte_always_inline uint32_t
291 fill_sg_comp(sg_comp_t *list,
293 phys_addr_t dma_addr,
296 sg_comp_t *to = &list[i>>2];
298 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
299 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
304 static __rte_always_inline uint32_t
305 fill_sg_comp_from_buf(sg_comp_t *list,
309 sg_comp_t *to = &list[i>>2];
311 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
312 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
317 static __rte_always_inline uint32_t
318 fill_sg_comp_from_buf_min(sg_comp_t *list,
323 sg_comp_t *to = &list[i >> 2];
324 uint32_t size = *psize;
327 e_len = (size > from->size) ? from->size : size;
328 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
329 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
336 * This fills the MC expected SGIO list
337 * from IOV given by user.
339 static __rte_always_inline uint32_t
340 fill_sg_comp_from_iov(sg_comp_t *list,
342 iov_ptr_t *from, uint32_t from_offset,
343 uint32_t *psize, buf_ptr_t *extra_buf,
344 uint32_t extra_offset)
347 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
348 uint32_t size = *psize;
352 for (j = 0; (j < from->buf_cnt) && size; j++) {
353 phys_addr_t e_dma_addr;
355 sg_comp_t *to = &list[i >> 2];
357 if (unlikely(from_offset)) {
358 if (from_offset >= bufs[j].size) {
359 from_offset -= bufs[j].size;
362 e_dma_addr = bufs[j].dma_addr + from_offset;
363 e_len = (size > (bufs[j].size - from_offset)) ?
364 (bufs[j].size - from_offset) : size;
367 e_dma_addr = bufs[j].dma_addr;
368 e_len = (size > bufs[j].size) ?
372 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
373 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
375 if (extra_len && (e_len >= extra_offset)) {
376 /* Break the data at given offset */
377 uint32_t next_len = e_len - extra_offset;
378 phys_addr_t next_dma = e_dma_addr + extra_offset;
383 e_len = extra_offset;
385 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
388 extra_len = RTE_MIN(extra_len, size);
389 /* Insert extra data ptr */
394 rte_cpu_to_be_16(extra_len);
396 rte_cpu_to_be_64(extra_buf->dma_addr);
400 next_len = RTE_MIN(next_len, size);
401 /* insert the rest of the data */
405 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
406 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
415 extra_offset -= size;
423 static __rte_always_inline void
424 cpt_digest_gen_prep(uint32_t flags,
426 digest_params_t *params,
430 struct cpt_request_info *req;
432 uint16_t data_len, mac_len, key_len;
433 auth_type_t hash_type;
436 sg_comp_t *gather_comp;
437 sg_comp_t *scatter_comp;
439 uint32_t g_size_bytes, s_size_bytes;
440 uint64_t dptr_dma, rptr_dma;
441 vq_cmd_word0_t vq_cmd_w0;
442 vq_cmd_word3_t vq_cmd_w3;
443 void *c_vaddr, *m_vaddr;
444 uint64_t c_dma, m_dma;
445 opcode_info_t opcode;
447 ctx = params->ctx_buf.vaddr;
448 meta_p = ¶ms->meta_buf;
450 m_vaddr = meta_p->vaddr;
451 m_dma = meta_p->dma_addr;
454 * Save initial space that followed app data for completion code &
455 * alternate completion code to fall in same cache line as app data
457 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
458 m_dma += COMPLETION_CODE_SIZE;
459 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
461 c_vaddr = (uint8_t *)m_vaddr + size;
462 c_dma = m_dma + size;
463 size += sizeof(cpt_res_s_t);
465 m_vaddr = (uint8_t *)m_vaddr + size;
470 size = sizeof(struct cpt_request_info);
471 m_vaddr = (uint8_t *)m_vaddr + size;
474 hash_type = ctx->hash_type;
475 mac_len = ctx->mac_len;
476 key_len = ctx->auth_key_len;
477 data_len = AUTH_DLEN(d_lens);
481 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
483 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
484 vq_cmd_w0.s.param1 = key_len;
485 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
487 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
488 vq_cmd_w0.s.param1 = 0;
489 vq_cmd_w0.s.dlen = data_len;
494 /* Null auth only case enters the if */
495 if (unlikely(!hash_type && !ctx->enc_cipher)) {
496 opcode.s.major = CPT_MAJOR_OP_MISC;
497 /* Minor op is passthrough */
498 opcode.s.minor = 0x03;
499 /* Send out completion code only */
500 vq_cmd_w0.s.param2 = 0x1;
503 vq_cmd_w0.s.opcode = opcode.flags;
505 /* DPTR has SG list */
509 ((uint16_t *)in_buffer)[0] = 0;
510 ((uint16_t *)in_buffer)[1] = 0;
512 /* TODO Add error check if space will be sufficient */
513 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
522 uint64_t k_dma = params->ctx_buf.dma_addr +
523 offsetof(struct cpt_ctx, auth_key);
525 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
531 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
533 if (unlikely(size)) {
534 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
540 * Looks like we need to support zero data
541 * gather ptr in case of hash & hmac
545 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
546 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
553 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
555 if (flags & VALID_MAC_BUF) {
556 if (unlikely(params->mac_buf.size < mac_len)) {
557 CPT_LOG_DP_ERR("Insufficient MAC size");
562 i = fill_sg_comp_from_buf_min(scatter_comp, i,
563 ¶ms->mac_buf, &size);
566 i = fill_sg_comp_from_iov(scatter_comp, i,
567 params->src_iov, data_len,
569 if (unlikely(size)) {
570 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
576 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
577 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
579 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
581 /* This is DPTR len incase of SG mode */
582 vq_cmd_w0.s.dlen = size;
584 m_vaddr = (uint8_t *)m_vaddr + size;
587 /* cpt alternate completion address saved earlier */
588 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
589 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
590 rptr_dma = c_dma - 8;
592 req->ist.ei1 = dptr_dma;
593 req->ist.ei2 = rptr_dma;
598 /* 16 byte aligned cpt res address */
599 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
600 *req->completion_addr = COMPLETION_CODE_INIT;
601 req->comp_baddr = c_dma;
603 /* Fill microcode part of instruction */
604 req->ist.ei0 = vq_cmd_w0.u64;
605 req->ist.ei3 = vq_cmd_w3.u64;
613 static __rte_always_inline void
614 cpt_enc_hmac_prep(uint32_t flags,
617 fc_params_t *fc_params,
621 uint32_t iv_offset = 0;
622 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
623 struct cpt_ctx *cpt_ctx;
624 uint32_t cipher_type, hash_type;
625 uint32_t mac_len, size;
627 struct cpt_request_info *req;
628 buf_ptr_t *meta_p, *aad_buf = NULL;
629 uint32_t encr_offset, auth_offset;
630 uint32_t encr_data_len, auth_data_len, aad_len = 0;
631 uint32_t passthrough_len = 0;
632 void *m_vaddr, *offset_vaddr;
633 uint64_t m_dma, offset_dma, ctx_dma;
634 vq_cmd_word0_t vq_cmd_w0;
635 vq_cmd_word3_t vq_cmd_w3;
638 opcode_info_t opcode;
640 meta_p = &fc_params->meta_buf;
641 m_vaddr = meta_p->vaddr;
642 m_dma = meta_p->dma_addr;
644 encr_offset = ENCR_OFFSET(d_offs);
645 auth_offset = AUTH_OFFSET(d_offs);
646 encr_data_len = ENCR_DLEN(d_lens);
647 auth_data_len = AUTH_DLEN(d_lens);
648 if (unlikely(flags & VALID_AAD_BUF)) {
650 * We dont support both aad
651 * and auth data separately
655 aad_len = fc_params->aad_buf.size;
656 aad_buf = &fc_params->aad_buf;
658 cpt_ctx = fc_params->ctx_buf.vaddr;
659 cipher_type = cpt_ctx->enc_cipher;
660 hash_type = cpt_ctx->hash_type;
661 mac_len = cpt_ctx->mac_len;
664 * Save initial space that followed app data for completion code &
665 * alternate completion code to fall in same cache line as app data
667 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
668 m_dma += COMPLETION_CODE_SIZE;
669 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
672 c_vaddr = (uint8_t *)m_vaddr + size;
673 c_dma = m_dma + size;
674 size += sizeof(cpt_res_s_t);
676 m_vaddr = (uint8_t *)m_vaddr + size;
679 /* start cpt request info struct at 8 byte boundary */
680 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
683 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
685 size += sizeof(struct cpt_request_info);
686 m_vaddr = (uint8_t *)m_vaddr + size;
689 if (unlikely(!(flags & VALID_IV_BUF))) {
691 iv_offset = ENCR_IV_OFFSET(d_offs);
694 if (unlikely(flags & VALID_AAD_BUF)) {
696 * When AAD is given, data above encr_offset is pass through
697 * Since AAD is given as separate pointer and not as offset,
698 * this is a special case as we need to fragment input data
699 * into passthrough + encr_data and then insert AAD in between.
701 if (hash_type != GMAC_TYPE) {
702 passthrough_len = encr_offset;
703 auth_offset = passthrough_len + iv_len;
704 encr_offset = passthrough_len + aad_len + iv_len;
705 auth_data_len = aad_len + encr_data_len;
707 passthrough_len = 16 + aad_len;
708 auth_offset = passthrough_len + iv_len;
709 auth_data_len = aad_len;
712 encr_offset += iv_len;
713 auth_offset += iv_len;
717 opcode.s.major = CPT_MAJOR_OP_FC;
720 if (hash_type == GMAC_TYPE) {
725 auth_dlen = auth_offset + auth_data_len;
726 enc_dlen = encr_data_len + encr_offset;
727 if (unlikely(encr_data_len & 0xf)) {
728 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
729 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
730 else if (likely((cipher_type == AES_CBC) ||
731 (cipher_type == AES_ECB)))
732 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
735 if (unlikely(auth_dlen > enc_dlen)) {
736 inputlen = auth_dlen;
737 outputlen = auth_dlen + mac_len;
740 outputlen = enc_dlen + mac_len;
745 vq_cmd_w0.s.param1 = encr_data_len;
746 vq_cmd_w0.s.param2 = auth_data_len;
748 * In 83XX since we have a limitation of
749 * IV & Offset control word not part of instruction
750 * and need to be part of Data Buffer, we check if
751 * head room is there and then only do the Direct mode processing
753 if (likely((flags & SINGLE_BUF_INPLACE) &&
754 (flags & SINGLE_BUF_HEADTAILROOM))) {
755 void *dm_vaddr = fc_params->bufs[0].vaddr;
756 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
758 * This flag indicates that there is 24 bytes head room and
759 * 8 bytes tail room available, so that we get to do
760 * DIRECT MODE with limitation
763 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
764 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
767 req->ist.ei1 = offset_dma;
768 /* RPTR should just exclude offset control word */
769 req->ist.ei2 = dm_dma_addr - iv_len;
770 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
771 + outputlen - iv_len);
773 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
775 vq_cmd_w0.s.opcode = opcode.flags;
777 if (likely(iv_len)) {
778 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
780 uint64_t *src = fc_params->iv_buf;
785 *(uint64_t *)offset_vaddr =
786 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
787 ((uint64_t)iv_offset << 8) |
788 ((uint64_t)auth_offset));
791 uint32_t i, g_size_bytes, s_size_bytes;
792 uint64_t dptr_dma, rptr_dma;
793 sg_comp_t *gather_comp;
794 sg_comp_t *scatter_comp;
797 /* This falls under strict SG mode */
798 offset_vaddr = m_vaddr;
800 size = OFF_CTRL_LEN + iv_len;
802 m_vaddr = (uint8_t *)m_vaddr + size;
805 opcode.s.major |= CPT_DMA_MODE;
807 vq_cmd_w0.s.opcode = opcode.flags;
809 if (likely(iv_len)) {
810 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
812 uint64_t *src = fc_params->iv_buf;
817 *(uint64_t *)offset_vaddr =
818 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
819 ((uint64_t)iv_offset << 8) |
820 ((uint64_t)auth_offset));
822 /* DPTR has SG list */
826 ((uint16_t *)in_buffer)[0] = 0;
827 ((uint16_t *)in_buffer)[1] = 0;
829 /* TODO Add error check if space will be sufficient */
830 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
838 /* Offset control word that includes iv */
839 i = fill_sg_comp(gather_comp, i, offset_dma,
840 OFF_CTRL_LEN + iv_len);
843 size = inputlen - iv_len;
845 uint32_t aad_offset = aad_len ? passthrough_len : 0;
847 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
848 i = fill_sg_comp_from_buf_min(gather_comp, i,
852 i = fill_sg_comp_from_iov(gather_comp, i,
855 aad_buf, aad_offset);
858 if (unlikely(size)) {
859 CPT_LOG_DP_ERR("Insufficient buffer space,"
860 " size %d needed", size);
864 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
865 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
868 * Output Scatter list
872 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
875 if (likely(iv_len)) {
876 i = fill_sg_comp(scatter_comp, i,
877 offset_dma + OFF_CTRL_LEN,
881 /* output data or output data + digest*/
882 if (unlikely(flags & VALID_MAC_BUF)) {
883 size = outputlen - iv_len - mac_len;
885 uint32_t aad_offset =
886 aad_len ? passthrough_len : 0;
888 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
889 i = fill_sg_comp_from_buf_min(
895 i = fill_sg_comp_from_iov(scatter_comp,
903 if (unlikely(size)) {
904 CPT_LOG_DP_ERR("Insufficient buffer"
905 " space, size %d needed",
912 i = fill_sg_comp_from_buf(scatter_comp, i,
913 &fc_params->mac_buf);
916 /* Output including mac */
917 size = outputlen - iv_len;
919 uint32_t aad_offset =
920 aad_len ? passthrough_len : 0;
922 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
923 i = fill_sg_comp_from_buf_min(
929 i = fill_sg_comp_from_iov(scatter_comp,
937 if (unlikely(size)) {
938 CPT_LOG_DP_ERR("Insufficient buffer"
939 " space, size %d needed",
945 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
946 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
948 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
950 /* This is DPTR len incase of SG mode */
951 vq_cmd_w0.s.dlen = size;
953 m_vaddr = (uint8_t *)m_vaddr + size;
956 /* cpt alternate completion address saved earlier */
957 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
958 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
959 rptr_dma = c_dma - 8;
961 req->ist.ei1 = dptr_dma;
962 req->ist.ei2 = rptr_dma;
965 ctx_dma = fc_params->ctx_buf.dma_addr +
966 offsetof(struct cpt_ctx, fctx);
970 vq_cmd_w3.s.cptr = ctx_dma;
972 /* 16 byte aligned cpt res address */
973 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
974 *req->completion_addr = COMPLETION_CODE_INIT;
975 req->comp_baddr = c_dma;
977 /* Fill microcode part of instruction */
978 req->ist.ei0 = vq_cmd_w0.u64;
979 req->ist.ei3 = vq_cmd_w3.u64;
987 static __rte_always_inline void
988 cpt_dec_hmac_prep(uint32_t flags,
991 fc_params_t *fc_params,
995 uint32_t iv_offset = 0, size;
996 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
997 struct cpt_ctx *cpt_ctx;
998 int32_t hash_type, mac_len;
1000 struct cpt_request_info *req;
1001 buf_ptr_t *meta_p, *aad_buf = NULL;
1002 uint32_t encr_offset, auth_offset;
1003 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1004 uint32_t passthrough_len = 0;
1005 void *m_vaddr, *offset_vaddr;
1006 uint64_t m_dma, offset_dma, ctx_dma;
1007 opcode_info_t opcode;
1008 vq_cmd_word0_t vq_cmd_w0;
1009 vq_cmd_word3_t vq_cmd_w3;
1013 meta_p = &fc_params->meta_buf;
1014 m_vaddr = meta_p->vaddr;
1015 m_dma = meta_p->dma_addr;
1017 encr_offset = ENCR_OFFSET(d_offs);
1018 auth_offset = AUTH_OFFSET(d_offs);
1019 encr_data_len = ENCR_DLEN(d_lens);
1020 auth_data_len = AUTH_DLEN(d_lens);
1022 if (unlikely(flags & VALID_AAD_BUF)) {
1024 * We dont support both aad
1025 * and auth data separately
1029 aad_len = fc_params->aad_buf.size;
1030 aad_buf = &fc_params->aad_buf;
1033 cpt_ctx = fc_params->ctx_buf.vaddr;
1034 hash_type = cpt_ctx->hash_type;
1035 mac_len = cpt_ctx->mac_len;
1037 if (unlikely(!(flags & VALID_IV_BUF))) {
1039 iv_offset = ENCR_IV_OFFSET(d_offs);
1042 if (unlikely(flags & VALID_AAD_BUF)) {
1044 * When AAD is given, data above encr_offset is pass through
1045 * Since AAD is given as separate pointer and not as offset,
1046 * this is a special case as we need to fragment input data
1047 * into passthrough + encr_data and then insert AAD in between.
1049 if (hash_type != GMAC_TYPE) {
1050 passthrough_len = encr_offset;
1051 auth_offset = passthrough_len + iv_len;
1052 encr_offset = passthrough_len + aad_len + iv_len;
1053 auth_data_len = aad_len + encr_data_len;
1055 passthrough_len = 16 + aad_len;
1056 auth_offset = passthrough_len + iv_len;
1057 auth_data_len = aad_len;
1060 encr_offset += iv_len;
1061 auth_offset += iv_len;
1065 * Save initial space that followed app data for completion code &
1066 * alternate completion code to fall in same cache line as app data
1068 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1069 m_dma += COMPLETION_CODE_SIZE;
1070 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1072 c_vaddr = (uint8_t *)m_vaddr + size;
1073 c_dma = m_dma + size;
1074 size += sizeof(cpt_res_s_t);
1076 m_vaddr = (uint8_t *)m_vaddr + size;
1079 /* start cpt request info structure at 8 byte alignment */
1080 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1083 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1085 size += sizeof(struct cpt_request_info);
1086 m_vaddr = (uint8_t *)m_vaddr + size;
1090 opcode.s.major = CPT_MAJOR_OP_FC;
1093 if (hash_type == GMAC_TYPE) {
1098 enc_dlen = encr_offset + encr_data_len;
1099 auth_dlen = auth_offset + auth_data_len;
1101 if (auth_dlen > enc_dlen) {
1102 inputlen = auth_dlen + mac_len;
1103 outputlen = auth_dlen;
1105 inputlen = enc_dlen + mac_len;
1106 outputlen = enc_dlen;
1110 vq_cmd_w0.s.param1 = encr_data_len;
1111 vq_cmd_w0.s.param2 = auth_data_len;
1114 * In 83XX since we have a limitation of
1115 * IV & Offset control word not part of instruction
1116 * and need to be part of Data Buffer, we check if
1117 * head room is there and then only do the Direct mode processing
1119 if (likely((flags & SINGLE_BUF_INPLACE) &&
1120 (flags & SINGLE_BUF_HEADTAILROOM))) {
1121 void *dm_vaddr = fc_params->bufs[0].vaddr;
1122 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1124 * This flag indicates that there is 24 bytes head room and
1125 * 8 bytes tail room available, so that we get to do
1126 * DIRECT MODE with limitation
1129 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1130 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1131 req->ist.ei1 = offset_dma;
1133 /* RPTR should just exclude offset control word */
1134 req->ist.ei2 = dm_dma_addr - iv_len;
1136 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1137 outputlen - iv_len);
1138 /* since this is decryption,
1139 * don't touch the content of
1140 * alternate ccode space as it contains
1144 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1146 vq_cmd_w0.s.opcode = opcode.flags;
1148 if (likely(iv_len)) {
1149 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1151 uint64_t *src = fc_params->iv_buf;
1156 *(uint64_t *)offset_vaddr =
1157 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1158 ((uint64_t)iv_offset << 8) |
1159 ((uint64_t)auth_offset));
1162 uint64_t dptr_dma, rptr_dma;
1163 uint32_t g_size_bytes, s_size_bytes;
1164 sg_comp_t *gather_comp;
1165 sg_comp_t *scatter_comp;
1169 /* This falls under strict SG mode */
1170 offset_vaddr = m_vaddr;
1172 size = OFF_CTRL_LEN + iv_len;
1174 m_vaddr = (uint8_t *)m_vaddr + size;
1177 opcode.s.major |= CPT_DMA_MODE;
1179 vq_cmd_w0.s.opcode = opcode.flags;
1181 if (likely(iv_len)) {
1182 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1184 uint64_t *src = fc_params->iv_buf;
1189 *(uint64_t *)offset_vaddr =
1190 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1191 ((uint64_t)iv_offset << 8) |
1192 ((uint64_t)auth_offset));
1194 /* DPTR has SG list */
1195 in_buffer = m_vaddr;
1198 ((uint16_t *)in_buffer)[0] = 0;
1199 ((uint16_t *)in_buffer)[1] = 0;
1201 /* TODO Add error check if space will be sufficient */
1202 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1209 /* Offset control word that includes iv */
1210 i = fill_sg_comp(gather_comp, i, offset_dma,
1211 OFF_CTRL_LEN + iv_len);
1213 /* Add input data */
1214 if (flags & VALID_MAC_BUF) {
1215 size = inputlen - iv_len - mac_len;
1217 /* input data only */
1218 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1219 i = fill_sg_comp_from_buf_min(
1224 uint32_t aad_offset = aad_len ?
1225 passthrough_len : 0;
1227 i = fill_sg_comp_from_iov(gather_comp,
1234 if (unlikely(size)) {
1235 CPT_LOG_DP_ERR("Insufficient buffer"
1236 " space, size %d needed",
1244 i = fill_sg_comp_from_buf(gather_comp, i,
1245 &fc_params->mac_buf);
1248 /* input data + mac */
1249 size = inputlen - iv_len;
1251 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1252 i = fill_sg_comp_from_buf_min(
1257 uint32_t aad_offset = aad_len ?
1258 passthrough_len : 0;
1260 if (unlikely(!fc_params->src_iov)) {
1261 CPT_LOG_DP_ERR("Bad input args");
1265 i = fill_sg_comp_from_iov(
1273 if (unlikely(size)) {
1274 CPT_LOG_DP_ERR("Insufficient buffer"
1275 " space, size %d needed",
1281 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1282 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1285 * Output Scatter List
1290 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1294 i = fill_sg_comp(scatter_comp, i,
1295 offset_dma + OFF_CTRL_LEN,
1299 /* Add output data */
1300 size = outputlen - iv_len;
1302 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1303 /* handle single buffer here */
1304 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1308 uint32_t aad_offset = aad_len ?
1309 passthrough_len : 0;
1311 if (unlikely(!fc_params->dst_iov)) {
1312 CPT_LOG_DP_ERR("Bad input args");
1316 i = fill_sg_comp_from_iov(scatter_comp, i,
1317 fc_params->dst_iov, 0,
1322 if (unlikely(size)) {
1323 CPT_LOG_DP_ERR("Insufficient buffer space,"
1324 " size %d needed", size);
1329 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1330 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1332 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1334 /* This is DPTR len incase of SG mode */
1335 vq_cmd_w0.s.dlen = size;
1337 m_vaddr = (uint8_t *)m_vaddr + size;
1340 /* cpt alternate completion address saved earlier */
1341 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1342 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1343 rptr_dma = c_dma - 8;
1344 size += COMPLETION_CODE_SIZE;
1346 req->ist.ei1 = dptr_dma;
1347 req->ist.ei2 = rptr_dma;
1350 ctx_dma = fc_params->ctx_buf.dma_addr +
1351 offsetof(struct cpt_ctx, fctx);
1354 vq_cmd_w3.s.grp = 0;
1355 vq_cmd_w3.s.cptr = ctx_dma;
1357 /* 16 byte aligned cpt res address */
1358 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1359 *req->completion_addr = COMPLETION_CODE_INIT;
1360 req->comp_baddr = c_dma;
1362 /* Fill microcode part of instruction */
1363 req->ist.ei0 = vq_cmd_w0.u64;
1364 req->ist.ei3 = vq_cmd_w3.u64;
1372 static __rte_always_inline void
1373 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1376 fc_params_t *params,
1381 int32_t inputlen, outputlen;
1382 struct cpt_ctx *cpt_ctx;
1383 uint32_t mac_len = 0;
1385 struct cpt_request_info *req;
1387 uint32_t encr_offset = 0, auth_offset = 0;
1388 uint32_t encr_data_len = 0, auth_data_len = 0;
1389 int flags, iv_len = 16;
1390 void *m_vaddr, *c_vaddr;
1391 uint64_t m_dma, c_dma, offset_ctrl;
1392 uint64_t *offset_vaddr, offset_dma;
1393 uint32_t *iv_s, iv[4];
1394 vq_cmd_word0_t vq_cmd_w0;
1395 vq_cmd_word3_t vq_cmd_w3;
1396 opcode_info_t opcode;
1398 buf_p = ¶ms->meta_buf;
1399 m_vaddr = buf_p->vaddr;
1400 m_dma = buf_p->dma_addr;
1402 cpt_ctx = params->ctx_buf.vaddr;
1403 flags = cpt_ctx->zsk_flags;
1404 mac_len = cpt_ctx->mac_len;
1405 snow3g = cpt_ctx->snow3g;
1408 * Save initial space that followed app data for completion code &
1409 * alternate completion code to fall in same cache line as app data
1411 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1412 m_dma += COMPLETION_CODE_SIZE;
1413 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1416 c_vaddr = (uint8_t *)m_vaddr + size;
1417 c_dma = m_dma + size;
1418 size += sizeof(cpt_res_s_t);
1420 m_vaddr = (uint8_t *)m_vaddr + size;
1423 /* Reserve memory for cpt request info */
1426 size = sizeof(struct cpt_request_info);
1427 m_vaddr = (uint8_t *)m_vaddr + size;
1430 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1432 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1434 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1435 (0 << 3) | (flags & 0x7));
1439 * Microcode expects offsets in bytes
1440 * TODO: Rounding off
1442 auth_data_len = AUTH_DLEN(d_lens);
1445 auth_offset = AUTH_OFFSET(d_offs);
1446 auth_offset = auth_offset / 8;
1448 /* consider iv len */
1449 auth_offset += iv_len;
1451 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1452 outputlen = mac_len;
1454 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1459 * Microcode expects offsets in bytes
1460 * TODO: Rounding off
1462 encr_data_len = ENCR_DLEN(d_lens);
1464 encr_offset = ENCR_OFFSET(d_offs);
1465 encr_offset = encr_offset / 8;
1466 /* consider iv len */
1467 encr_offset += iv_len;
1469 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1470 outputlen = inputlen;
1472 /* iv offset is 0 */
1473 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1477 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1482 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1483 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1486 for (j = 0; j < 4; j++)
1487 iv[j] = iv_s[3 - j];
1489 /* ZUC doesn't need a swap */
1490 for (j = 0; j < 4; j++)
1495 * GP op header, lengths are expected in bits.
1498 vq_cmd_w0.s.param1 = encr_data_len;
1499 vq_cmd_w0.s.param2 = auth_data_len;
1502 * In 83XX since we have a limitation of
1503 * IV & Offset control word not part of instruction
1504 * and need to be part of Data Buffer, we check if
1505 * head room is there and then only do the Direct mode processing
1507 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1508 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1509 void *dm_vaddr = params->bufs[0].vaddr;
1510 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1512 * This flag indicates that there is 24 bytes head room and
1513 * 8 bytes tail room available, so that we get to do
1514 * DIRECT MODE with limitation
1517 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1518 OFF_CTRL_LEN - iv_len);
1519 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1522 req->ist.ei1 = offset_dma;
1523 /* RPTR should just exclude offset control word */
1524 req->ist.ei2 = dm_dma_addr - iv_len;
1525 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1526 + outputlen - iv_len);
1528 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1530 vq_cmd_w0.s.opcode = opcode.flags;
1532 if (likely(iv_len)) {
1533 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1535 memcpy(iv_d, iv, 16);
1538 *offset_vaddr = offset_ctrl;
1540 uint32_t i, g_size_bytes, s_size_bytes;
1541 uint64_t dptr_dma, rptr_dma;
1542 sg_comp_t *gather_comp;
1543 sg_comp_t *scatter_comp;
1547 /* save space for iv */
1548 offset_vaddr = m_vaddr;
1551 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1552 m_dma += OFF_CTRL_LEN + iv_len;
1554 opcode.s.major |= CPT_DMA_MODE;
1556 vq_cmd_w0.s.opcode = opcode.flags;
1558 /* DPTR has SG list */
1559 in_buffer = m_vaddr;
1562 ((uint16_t *)in_buffer)[0] = 0;
1563 ((uint16_t *)in_buffer)[1] = 0;
1565 /* TODO Add error check if space will be sufficient */
1566 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1573 /* Offset control word followed by iv */
1575 i = fill_sg_comp(gather_comp, i, offset_dma,
1576 OFF_CTRL_LEN + iv_len);
1578 /* iv offset is 0 */
1579 *offset_vaddr = offset_ctrl;
1581 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1582 memcpy(iv_d, iv, 16);
1585 size = inputlen - iv_len;
1587 i = fill_sg_comp_from_iov(gather_comp, i,
1590 if (unlikely(size)) {
1591 CPT_LOG_DP_ERR("Insufficient buffer space,"
1592 " size %d needed", size);
1596 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1597 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1600 * Output Scatter List
1605 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1608 /* IV in SLIST only for EEA3 & UEA2 */
1613 i = fill_sg_comp(scatter_comp, i,
1614 offset_dma + OFF_CTRL_LEN, iv_len);
1617 /* Add output data */
1618 if (req_flags & VALID_MAC_BUF) {
1619 size = outputlen - iv_len - mac_len;
1621 i = fill_sg_comp_from_iov(scatter_comp, i,
1625 if (unlikely(size)) {
1626 CPT_LOG_DP_ERR("Insufficient buffer space,"
1627 " size %d needed", size);
1634 i = fill_sg_comp_from_buf(scatter_comp, i,
1638 /* Output including mac */
1639 size = outputlen - iv_len;
1641 i = fill_sg_comp_from_iov(scatter_comp, i,
1645 if (unlikely(size)) {
1646 CPT_LOG_DP_ERR("Insufficient buffer space,"
1647 " size %d needed", size);
1652 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1653 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1655 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1657 /* This is DPTR len incase of SG mode */
1658 vq_cmd_w0.s.dlen = size;
1660 m_vaddr = (uint8_t *)m_vaddr + size;
1663 /* cpt alternate completion address saved earlier */
1664 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1665 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1666 rptr_dma = c_dma - 8;
1668 req->ist.ei1 = dptr_dma;
1669 req->ist.ei2 = rptr_dma;
1674 vq_cmd_w3.s.grp = 0;
1675 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1676 offsetof(struct cpt_ctx, zs_ctx);
1678 /* 16 byte aligned cpt res address */
1679 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1680 *req->completion_addr = COMPLETION_CODE_INIT;
1681 req->comp_baddr = c_dma;
1683 /* Fill microcode part of instruction */
1684 req->ist.ei0 = vq_cmd_w0.u64;
1685 req->ist.ei3 = vq_cmd_w3.u64;
1693 static __rte_always_inline void
1694 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1697 fc_params_t *params,
1702 int32_t inputlen = 0, outputlen;
1703 struct cpt_ctx *cpt_ctx;
1704 uint8_t snow3g, iv_len = 16;
1705 struct cpt_request_info *req;
1707 uint32_t encr_offset;
1708 uint32_t encr_data_len;
1710 void *m_vaddr, *c_vaddr;
1711 uint64_t m_dma, c_dma;
1712 uint64_t *offset_vaddr, offset_dma;
1713 uint32_t *iv_s, iv[4], j;
1714 vq_cmd_word0_t vq_cmd_w0;
1715 vq_cmd_word3_t vq_cmd_w3;
1716 opcode_info_t opcode;
1718 buf_p = ¶ms->meta_buf;
1719 m_vaddr = buf_p->vaddr;
1720 m_dma = buf_p->dma_addr;
1723 * Microcode expects offsets in bytes
1724 * TODO: Rounding off
1726 encr_offset = ENCR_OFFSET(d_offs) / 8;
1727 encr_data_len = ENCR_DLEN(d_lens);
1729 cpt_ctx = params->ctx_buf.vaddr;
1730 flags = cpt_ctx->zsk_flags;
1731 snow3g = cpt_ctx->snow3g;
1733 * Save initial space that followed app data for completion code &
1734 * alternate completion code to fall in same cache line as app data
1736 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1737 m_dma += COMPLETION_CODE_SIZE;
1738 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1741 c_vaddr = (uint8_t *)m_vaddr + size;
1742 c_dma = m_dma + size;
1743 size += sizeof(cpt_res_s_t);
1745 m_vaddr = (uint8_t *)m_vaddr + size;
1748 /* Reserve memory for cpt request info */
1751 size = sizeof(struct cpt_request_info);
1752 m_vaddr = (uint8_t *)m_vaddr + size;
1755 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1757 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1759 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1760 (0 << 3) | (flags & 0x7));
1762 /* consider iv len */
1763 encr_offset += iv_len;
1765 inputlen = encr_offset +
1766 (RTE_ALIGN(encr_data_len, 8) / 8);
1767 outputlen = inputlen;
1770 iv_s = params->iv_buf;
1773 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1774 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1777 for (j = 0; j < 4; j++)
1778 iv[j] = iv_s[3 - j];
1780 /* ZUC doesn't need a swap */
1781 for (j = 0; j < 4; j++)
1786 * GP op header, lengths are expected in bits.
1789 vq_cmd_w0.s.param1 = encr_data_len;
1792 * In 83XX since we have a limitation of
1793 * IV & Offset control word not part of instruction
1794 * and need to be part of Data Buffer, we check if
1795 * head room is there and then only do the Direct mode processing
1797 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1798 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1799 void *dm_vaddr = params->bufs[0].vaddr;
1800 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1802 * This flag indicates that there is 24 bytes head room and
1803 * 8 bytes tail room available, so that we get to do
1804 * DIRECT MODE with limitation
1807 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1808 OFF_CTRL_LEN - iv_len);
1809 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1812 req->ist.ei1 = offset_dma;
1813 /* RPTR should just exclude offset control word */
1814 req->ist.ei2 = dm_dma_addr - iv_len;
1815 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1816 + outputlen - iv_len);
1818 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1820 vq_cmd_w0.s.opcode = opcode.flags;
1822 if (likely(iv_len)) {
1823 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1825 memcpy(iv_d, iv, 16);
1828 /* iv offset is 0 */
1829 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1831 uint32_t i, g_size_bytes, s_size_bytes;
1832 uint64_t dptr_dma, rptr_dma;
1833 sg_comp_t *gather_comp;
1834 sg_comp_t *scatter_comp;
1838 /* save space for offset and iv... */
1839 offset_vaddr = m_vaddr;
1842 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1843 m_dma += OFF_CTRL_LEN + iv_len;
1845 opcode.s.major |= CPT_DMA_MODE;
1847 vq_cmd_w0.s.opcode = opcode.flags;
1849 /* DPTR has SG list */
1850 in_buffer = m_vaddr;
1853 ((uint16_t *)in_buffer)[0] = 0;
1854 ((uint16_t *)in_buffer)[1] = 0;
1856 /* TODO Add error check if space will be sufficient */
1857 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1864 /* Offset control word */
1866 /* iv offset is 0 */
1867 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1869 i = fill_sg_comp(gather_comp, i, offset_dma,
1870 OFF_CTRL_LEN + iv_len);
1872 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1873 memcpy(iv_d, iv, 16);
1875 /* Add input data */
1876 size = inputlen - iv_len;
1878 i = fill_sg_comp_from_iov(gather_comp, i,
1881 if (unlikely(size)) {
1882 CPT_LOG_DP_ERR("Insufficient buffer space,"
1883 " size %d needed", size);
1887 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1888 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1891 * Output Scatter List
1896 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1899 i = fill_sg_comp(scatter_comp, i,
1900 offset_dma + OFF_CTRL_LEN,
1903 /* Add output data */
1904 size = outputlen - iv_len;
1906 i = fill_sg_comp_from_iov(scatter_comp, i,
1910 if (unlikely(size)) {
1911 CPT_LOG_DP_ERR("Insufficient buffer space,"
1912 " size %d needed", size);
1916 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1917 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1919 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1921 /* This is DPTR len incase of SG mode */
1922 vq_cmd_w0.s.dlen = size;
1924 m_vaddr = (uint8_t *)m_vaddr + size;
1927 /* cpt alternate completion address saved earlier */
1928 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1929 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1930 rptr_dma = c_dma - 8;
1932 req->ist.ei1 = dptr_dma;
1933 req->ist.ei2 = rptr_dma;
1938 vq_cmd_w3.s.grp = 0;
1939 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1940 offsetof(struct cpt_ctx, zs_ctx);
1942 /* 16 byte aligned cpt res address */
1943 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1944 *req->completion_addr = COMPLETION_CODE_INIT;
1945 req->comp_baddr = c_dma;
1947 /* Fill microcode part of instruction */
1948 req->ist.ei0 = vq_cmd_w0.u64;
1949 req->ist.ei3 = vq_cmd_w3.u64;
1957 static __rte_always_inline void
1958 cpt_kasumi_enc_prep(uint32_t req_flags,
1961 fc_params_t *params,
1966 int32_t inputlen = 0, outputlen = 0;
1967 struct cpt_ctx *cpt_ctx;
1968 uint32_t mac_len = 0;
1970 struct cpt_request_info *req;
1972 uint32_t encr_offset, auth_offset;
1973 uint32_t encr_data_len, auth_data_len;
1975 uint8_t *iv_s, *iv_d, iv_len = 8;
1977 void *m_vaddr, *c_vaddr;
1978 uint64_t m_dma, c_dma;
1979 uint64_t *offset_vaddr, offset_dma;
1980 vq_cmd_word0_t vq_cmd_w0;
1981 vq_cmd_word3_t vq_cmd_w3;
1982 opcode_info_t opcode;
1984 uint32_t g_size_bytes, s_size_bytes;
1985 uint64_t dptr_dma, rptr_dma;
1986 sg_comp_t *gather_comp;
1987 sg_comp_t *scatter_comp;
1989 buf_p = ¶ms->meta_buf;
1990 m_vaddr = buf_p->vaddr;
1991 m_dma = buf_p->dma_addr;
1993 encr_offset = ENCR_OFFSET(d_offs) / 8;
1994 auth_offset = AUTH_OFFSET(d_offs) / 8;
1995 encr_data_len = ENCR_DLEN(d_lens);
1996 auth_data_len = AUTH_DLEN(d_lens);
1998 cpt_ctx = params->ctx_buf.vaddr;
1999 flags = cpt_ctx->zsk_flags;
2000 mac_len = cpt_ctx->mac_len;
2003 iv_s = params->iv_buf;
2005 iv_s = params->auth_iv_buf;
2007 dir = iv_s[8] & 0x1;
2010 * Save initial space that followed app data for completion code &
2011 * alternate completion code to fall in same cache line as app data
2013 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2014 m_dma += COMPLETION_CODE_SIZE;
2015 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2018 c_vaddr = (uint8_t *)m_vaddr + size;
2019 c_dma = m_dma + size;
2020 size += sizeof(cpt_res_s_t);
2022 m_vaddr = (uint8_t *)m_vaddr + size;
2025 /* Reserve memory for cpt request info */
2028 size = sizeof(struct cpt_request_info);
2029 m_vaddr = (uint8_t *)m_vaddr + size;
2032 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2034 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2035 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2036 (dir << 4) | (0 << 3) | (flags & 0x7));
2039 * GP op header, lengths are expected in bits.
2042 vq_cmd_w0.s.param1 = encr_data_len;
2043 vq_cmd_w0.s.param2 = auth_data_len;
2044 vq_cmd_w0.s.opcode = opcode.flags;
2046 /* consider iv len */
2048 encr_offset += iv_len;
2049 auth_offset += iv_len;
2052 /* save space for offset ctrl and iv */
2053 offset_vaddr = m_vaddr;
2056 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2057 m_dma += OFF_CTRL_LEN + iv_len;
2059 /* DPTR has SG list */
2060 in_buffer = m_vaddr;
2063 ((uint16_t *)in_buffer)[0] = 0;
2064 ((uint16_t *)in_buffer)[1] = 0;
2066 /* TODO Add error check if space will be sufficient */
2067 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2074 /* Offset control word followed by iv */
2077 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2078 outputlen = inputlen;
2079 /* iv offset is 0 */
2080 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2082 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2083 outputlen = mac_len;
2084 /* iv offset is 0 */
2085 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2088 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2091 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2092 memcpy(iv_d, iv_s, iv_len);
2095 size = inputlen - iv_len;
2097 i = fill_sg_comp_from_iov(gather_comp, i,
2101 if (unlikely(size)) {
2102 CPT_LOG_DP_ERR("Insufficient buffer space,"
2103 " size %d needed", size);
2107 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2108 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2111 * Output Scatter List
2115 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2118 /* IV in SLIST only for F8 */
2124 i = fill_sg_comp(scatter_comp, i,
2125 offset_dma + OFF_CTRL_LEN,
2129 /* Add output data */
2130 if (req_flags & VALID_MAC_BUF) {
2131 size = outputlen - iv_len - mac_len;
2133 i = fill_sg_comp_from_iov(scatter_comp, i,
2137 if (unlikely(size)) {
2138 CPT_LOG_DP_ERR("Insufficient buffer space,"
2139 " size %d needed", size);
2146 i = fill_sg_comp_from_buf(scatter_comp, i,
2150 /* Output including mac */
2151 size = outputlen - iv_len;
2153 i = fill_sg_comp_from_iov(scatter_comp, i,
2157 if (unlikely(size)) {
2158 CPT_LOG_DP_ERR("Insufficient buffer space,"
2159 " size %d needed", size);
2164 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2165 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2167 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2169 /* This is DPTR len incase of SG mode */
2170 vq_cmd_w0.s.dlen = size;
2172 m_vaddr = (uint8_t *)m_vaddr + size;
2175 /* cpt alternate completion address saved earlier */
2176 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2177 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2178 rptr_dma = c_dma - 8;
2180 req->ist.ei1 = dptr_dma;
2181 req->ist.ei2 = rptr_dma;
2185 vq_cmd_w3.s.grp = 0;
2186 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2187 offsetof(struct cpt_ctx, k_ctx);
2189 /* 16 byte aligned cpt res address */
2190 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2191 *req->completion_addr = COMPLETION_CODE_INIT;
2192 req->comp_baddr = c_dma;
2194 /* Fill microcode part of instruction */
2195 req->ist.ei0 = vq_cmd_w0.u64;
2196 req->ist.ei3 = vq_cmd_w3.u64;
2204 static __rte_always_inline void
2205 cpt_kasumi_dec_prep(uint64_t d_offs,
2207 fc_params_t *params,
2212 int32_t inputlen = 0, outputlen;
2213 struct cpt_ctx *cpt_ctx;
2214 uint8_t i = 0, iv_len = 8;
2215 struct cpt_request_info *req;
2217 uint32_t encr_offset;
2218 uint32_t encr_data_len;
2221 void *m_vaddr, *c_vaddr;
2222 uint64_t m_dma, c_dma;
2223 uint64_t *offset_vaddr, offset_dma;
2224 vq_cmd_word0_t vq_cmd_w0;
2225 vq_cmd_word3_t vq_cmd_w3;
2226 opcode_info_t opcode;
2228 uint32_t g_size_bytes, s_size_bytes;
2229 uint64_t dptr_dma, rptr_dma;
2230 sg_comp_t *gather_comp;
2231 sg_comp_t *scatter_comp;
2233 buf_p = ¶ms->meta_buf;
2234 m_vaddr = buf_p->vaddr;
2235 m_dma = buf_p->dma_addr;
2237 encr_offset = ENCR_OFFSET(d_offs) / 8;
2238 encr_data_len = ENCR_DLEN(d_lens);
2240 cpt_ctx = params->ctx_buf.vaddr;
2241 flags = cpt_ctx->zsk_flags;
2243 * Save initial space that followed app data for completion code &
2244 * alternate completion code to fall in same cache line as app data
2246 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2247 m_dma += COMPLETION_CODE_SIZE;
2248 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2251 c_vaddr = (uint8_t *)m_vaddr + size;
2252 c_dma = m_dma + size;
2253 size += sizeof(cpt_res_s_t);
2255 m_vaddr = (uint8_t *)m_vaddr + size;
2258 /* Reserve memory for cpt request info */
2261 size = sizeof(struct cpt_request_info);
2262 m_vaddr = (uint8_t *)m_vaddr + size;
2265 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2267 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2268 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2269 (dir << 4) | (0 << 3) | (flags & 0x7));
2272 * GP op header, lengths are expected in bits.
2275 vq_cmd_w0.s.param1 = encr_data_len;
2276 vq_cmd_w0.s.opcode = opcode.flags;
2278 /* consider iv len */
2279 encr_offset += iv_len;
2281 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2282 outputlen = inputlen;
2284 /* save space for offset ctrl & iv */
2285 offset_vaddr = m_vaddr;
2288 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2289 m_dma += OFF_CTRL_LEN + iv_len;
2291 /* DPTR has SG list */
2292 in_buffer = m_vaddr;
2295 ((uint16_t *)in_buffer)[0] = 0;
2296 ((uint16_t *)in_buffer)[1] = 0;
2298 /* TODO Add error check if space will be sufficient */
2299 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2306 /* Offset control word followed by iv */
2307 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2309 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2312 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2313 params->iv_buf, iv_len);
2315 /* Add input data */
2316 size = inputlen - iv_len;
2318 i = fill_sg_comp_from_iov(gather_comp, i,
2321 if (unlikely(size)) {
2322 CPT_LOG_DP_ERR("Insufficient buffer space,"
2323 " size %d needed", size);
2327 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2328 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2331 * Output Scatter List
2335 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2338 i = fill_sg_comp(scatter_comp, i,
2339 offset_dma + OFF_CTRL_LEN,
2342 /* Add output data */
2343 size = outputlen - iv_len;
2345 i = fill_sg_comp_from_iov(scatter_comp, i,
2348 if (unlikely(size)) {
2349 CPT_LOG_DP_ERR("Insufficient buffer space,"
2350 " size %d needed", size);
2354 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2355 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2357 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2359 /* This is DPTR len incase of SG mode */
2360 vq_cmd_w0.s.dlen = size;
2362 m_vaddr = (uint8_t *)m_vaddr + size;
2365 /* cpt alternate completion address saved earlier */
2366 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2367 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2368 rptr_dma = c_dma - 8;
2370 req->ist.ei1 = dptr_dma;
2371 req->ist.ei2 = rptr_dma;
2375 vq_cmd_w3.s.grp = 0;
2376 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2377 offsetof(struct cpt_ctx, k_ctx);
2379 /* 16 byte aligned cpt res address */
2380 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2381 *req->completion_addr = COMPLETION_CODE_INIT;
2382 req->comp_baddr = c_dma;
2384 /* Fill microcode part of instruction */
2385 req->ist.ei0 = vq_cmd_w0.u64;
2386 req->ist.ei3 = vq_cmd_w3.u64;
2394 static __rte_always_inline void *
2395 cpt_fc_dec_hmac_prep(uint32_t flags,
2398 fc_params_t *fc_params,
2401 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2403 void *prep_req = NULL;
2405 fc_type = ctx->fc_type;
2407 if (likely(fc_type == FC_GEN)) {
2408 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2410 } else if (fc_type == ZUC_SNOW3G) {
2411 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2413 } else if (fc_type == KASUMI) {
2414 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2418 * For AUTH_ONLY case,
2419 * MC only supports digest generation and verification
2420 * should be done in software by memcmp()
2426 static __rte_always_inline void *__rte_hot
2427 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2428 fc_params_t *fc_params, void *op)
2430 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2432 void *prep_req = NULL;
2434 fc_type = ctx->fc_type;
2436 /* Common api for rest of the ops */
2437 if (likely(fc_type == FC_GEN)) {
2438 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2440 } else if (fc_type == ZUC_SNOW3G) {
2441 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2443 } else if (fc_type == KASUMI) {
2444 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2446 } else if (fc_type == HASH_HMAC) {
2447 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2453 static __rte_always_inline int
2454 cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
2455 uint16_t key_len, uint16_t mac_len)
2457 struct cpt_ctx *cpt_ctx = ctx;
2458 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2460 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2465 /* No support for AEAD yet */
2466 if (cpt_ctx->enc_cipher)
2468 /* For ZUC/SNOW3G/Kasumi */
2471 cpt_ctx->snow3g = 1;
2472 gen_key_snow3g(key, keyx);
2473 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2474 cpt_ctx->fc_type = ZUC_SNOW3G;
2475 cpt_ctx->zsk_flags = 0x1;
2478 cpt_ctx->snow3g = 0;
2479 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2480 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2481 cpt_ctx->fc_type = ZUC_SNOW3G;
2482 cpt_ctx->zsk_flags = 0x1;
2485 /* Kasumi ECB mode */
2487 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2488 cpt_ctx->fc_type = KASUMI;
2489 cpt_ctx->zsk_flags = 0x1;
2492 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2493 cpt_ctx->fc_type = KASUMI;
2494 cpt_ctx->zsk_flags = 0x1;
2499 cpt_ctx->mac_len = 4;
2500 cpt_ctx->hash_type = type;
2504 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2505 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2506 cpt_ctx->fc_type = HASH_HMAC;
2509 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2512 /* For GMAC auth, cipher must be NULL */
2513 if (type == GMAC_TYPE)
2514 fctx->enc.enc_cipher = 0;
2516 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2517 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2521 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2522 memcpy(cpt_ctx->auth_key, key, key_len);
2523 cpt_ctx->auth_key_len = key_len;
2524 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2525 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2528 memcpy(fctx->hmac.opad, key, key_len);
2529 fctx->enc.auth_input_type = 1;
2534 static __rte_always_inline int
2535 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2536 struct cpt_sess_misc *sess)
2538 struct rte_crypto_aead_xform *aead_form;
2539 cipher_type_t enc_type = 0; /* NULL Cipher type */
2540 auth_type_t auth_type = 0; /* NULL Auth type */
2541 uint32_t cipher_key_len = 0;
2542 uint8_t aes_gcm = 0;
2543 aead_form = &xform->aead;
2544 void *ctx = SESS_PRIV(sess);
2546 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2547 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2548 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2549 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2550 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2551 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2553 CPT_LOG_DP_ERR("Unknown aead operation\n");
2556 switch (aead_form->algo) {
2557 case RTE_CRYPTO_AEAD_AES_GCM:
2559 cipher_key_len = 16;
2562 case RTE_CRYPTO_AEAD_AES_CCM:
2563 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2566 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2567 enc_type = CHACHA20;
2568 auth_type = POLY1305;
2569 cipher_key_len = 32;
2570 sess->chacha_poly = 1;
2573 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2577 if (aead_form->key.length < cipher_key_len) {
2578 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2579 (unsigned int long)aead_form->key.length);
2583 sess->aes_gcm = aes_gcm;
2584 sess->mac_len = aead_form->digest_length;
2585 sess->iv_offset = aead_form->iv.offset;
2586 sess->iv_length = aead_form->iv.length;
2587 sess->aad_length = aead_form->aad_length;
2589 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2590 aead_form->key.length, NULL)))
2593 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2594 aead_form->digest_length)))
2600 static __rte_always_inline int
2601 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2602 struct cpt_sess_misc *sess)
2604 struct rte_crypto_cipher_xform *c_form;
2605 cipher_type_t enc_type = 0; /* NULL Cipher type */
2606 uint32_t cipher_key_len = 0;
2607 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2609 c_form = &xform->cipher;
2611 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2612 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2613 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2614 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2616 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2620 switch (c_form->algo) {
2621 case RTE_CRYPTO_CIPHER_AES_CBC:
2623 cipher_key_len = 16;
2625 case RTE_CRYPTO_CIPHER_3DES_CBC:
2626 enc_type = DES3_CBC;
2627 cipher_key_len = 24;
2629 case RTE_CRYPTO_CIPHER_DES_CBC:
2630 /* DES is implemented using 3DES in hardware */
2631 enc_type = DES3_CBC;
2634 case RTE_CRYPTO_CIPHER_AES_CTR:
2636 cipher_key_len = 16;
2639 case RTE_CRYPTO_CIPHER_NULL:
2643 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2644 enc_type = KASUMI_F8_ECB;
2645 cipher_key_len = 16;
2648 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2649 enc_type = SNOW3G_UEA2;
2650 cipher_key_len = 16;
2653 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2654 enc_type = ZUC_EEA3;
2655 cipher_key_len = 16;
2658 case RTE_CRYPTO_CIPHER_AES_XTS:
2660 cipher_key_len = 16;
2662 case RTE_CRYPTO_CIPHER_3DES_ECB:
2663 enc_type = DES3_ECB;
2664 cipher_key_len = 24;
2666 case RTE_CRYPTO_CIPHER_AES_ECB:
2668 cipher_key_len = 16;
2670 case RTE_CRYPTO_CIPHER_3DES_CTR:
2671 case RTE_CRYPTO_CIPHER_AES_F8:
2672 case RTE_CRYPTO_CIPHER_ARC4:
2673 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2677 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2682 if (c_form->key.length < cipher_key_len) {
2683 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2684 (unsigned long) c_form->key.length);
2688 sess->zsk_flag = zsk_flag;
2690 sess->aes_ctr = aes_ctr;
2691 sess->iv_offset = c_form->iv.offset;
2692 sess->iv_length = c_form->iv.length;
2693 sess->is_null = is_null;
2695 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2696 c_form->key.data, c_form->key.length, NULL)))
2702 static __rte_always_inline int
2703 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2704 struct cpt_sess_misc *sess)
2706 struct rte_crypto_auth_xform *a_form;
2707 auth_type_t auth_type = 0; /* NULL Auth type */
2708 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2710 a_form = &xform->auth;
2712 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2713 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2714 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2715 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2717 CPT_LOG_DP_ERR("Unknown auth operation");
2721 switch (a_form->algo) {
2722 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2724 case RTE_CRYPTO_AUTH_SHA1:
2725 auth_type = SHA1_TYPE;
2727 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2728 case RTE_CRYPTO_AUTH_SHA256:
2729 auth_type = SHA2_SHA256;
2731 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2732 case RTE_CRYPTO_AUTH_SHA512:
2733 auth_type = SHA2_SHA512;
2735 case RTE_CRYPTO_AUTH_AES_GMAC:
2736 auth_type = GMAC_TYPE;
2739 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2740 case RTE_CRYPTO_AUTH_SHA224:
2741 auth_type = SHA2_SHA224;
2743 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2744 case RTE_CRYPTO_AUTH_SHA384:
2745 auth_type = SHA2_SHA384;
2747 case RTE_CRYPTO_AUTH_MD5_HMAC:
2748 case RTE_CRYPTO_AUTH_MD5:
2749 auth_type = MD5_TYPE;
2751 case RTE_CRYPTO_AUTH_KASUMI_F9:
2752 auth_type = KASUMI_F9_ECB;
2754 * Indicate that direction needs to be taken out
2759 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2760 auth_type = SNOW3G_UIA2;
2763 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2764 auth_type = ZUC_EIA3;
2767 case RTE_CRYPTO_AUTH_NULL:
2771 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2772 case RTE_CRYPTO_AUTH_AES_CMAC:
2773 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2774 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2778 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2783 sess->zsk_flag = zsk_flag;
2784 sess->aes_gcm = aes_gcm;
2785 sess->mac_len = a_form->digest_length;
2786 sess->is_null = is_null;
2788 sess->auth_iv_offset = a_form->iv.offset;
2789 sess->auth_iv_length = a_form->iv.length;
2791 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2792 a_form->key.data, a_form->key.length,
2793 a_form->digest_length)))
2799 static __rte_always_inline int
2800 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2801 struct cpt_sess_misc *sess)
2803 struct rte_crypto_auth_xform *a_form;
2804 cipher_type_t enc_type = 0; /* NULL Cipher type */
2805 auth_type_t auth_type = 0; /* NULL Auth type */
2806 void *ctx = SESS_PRIV(sess);
2808 a_form = &xform->auth;
2810 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2811 sess->cpt_op |= CPT_OP_ENCODE;
2812 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2813 sess->cpt_op |= CPT_OP_DECODE;
2815 CPT_LOG_DP_ERR("Unknown auth operation");
2819 switch (a_form->algo) {
2820 case RTE_CRYPTO_AUTH_AES_GMAC:
2822 auth_type = GMAC_TYPE;
2825 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2833 sess->iv_offset = a_form->iv.offset;
2834 sess->iv_length = a_form->iv.length;
2835 sess->mac_len = a_form->digest_length;
2837 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2838 a_form->key.length, NULL)))
2841 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2842 a_form->digest_length)))
2848 static __rte_always_inline void *
2849 alloc_op_meta(struct rte_mbuf *m_src,
2852 struct rte_mempool *cpt_meta_pool)
2856 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2857 if (likely(m_src && (m_src->nb_segs == 1))) {
2861 /* Check if tailroom is sufficient to hold meta data */
2862 tailroom = rte_pktmbuf_tailroom(m_src);
2863 if (likely(tailroom > len + 8)) {
2864 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2865 mphys = m_src->buf_iova + m_src->buf_len;
2869 buf->dma_addr = mphys;
2871 /* Indicate that this is a mbuf allocated mdata */
2872 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2877 RTE_SET_USED(m_src);
2880 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2884 buf->dma_addr = rte_mempool_virt2iova(mdata);
2891 * cpt_free_metabuf - free metabuf to mempool.
2892 * @param instance: pointer to instance.
2893 * @param objp: pointer to the metabuf.
2895 static __rte_always_inline void
2896 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2898 bool nofree = ((uintptr_t)mdata & 1ull);
2902 rte_mempool_put(cpt_meta_pool, mdata);
2905 static __rte_always_inline uint32_t
2906 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2907 iov_ptr_t *iovec, uint32_t start_offset)
2910 void *seg_data = NULL;
2911 phys_addr_t seg_phys;
2912 int32_t seg_size = 0;
2919 if (!start_offset) {
2920 seg_data = rte_pktmbuf_mtod(pkt, void *);
2921 seg_phys = rte_pktmbuf_iova(pkt);
2922 seg_size = pkt->data_len;
2924 while (start_offset >= pkt->data_len) {
2925 start_offset -= pkt->data_len;
2929 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2930 seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2931 seg_size = pkt->data_len - start_offset;
2937 iovec->bufs[index].vaddr = seg_data;
2938 iovec->bufs[index].dma_addr = seg_phys;
2939 iovec->bufs[index].size = seg_size;
2943 while (unlikely(pkt != NULL)) {
2944 seg_data = rte_pktmbuf_mtod(pkt, void *);
2945 seg_phys = rte_pktmbuf_iova(pkt);
2946 seg_size = pkt->data_len;
2950 iovec->bufs[index].vaddr = seg_data;
2951 iovec->bufs[index].dma_addr = seg_phys;
2952 iovec->bufs[index].size = seg_size;
2959 iovec->buf_cnt = index;
2963 static __rte_always_inline uint32_t
2964 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2969 void *seg_data = NULL;
2970 phys_addr_t seg_phys;
2971 uint32_t seg_size = 0;
2974 seg_data = rte_pktmbuf_mtod(pkt, void *);
2975 seg_phys = rte_pktmbuf_iova(pkt);
2976 seg_size = pkt->data_len;
2979 if (likely(!pkt->next)) {
2980 uint32_t headroom, tailroom;
2982 *flags |= SINGLE_BUF_INPLACE;
2983 headroom = rte_pktmbuf_headroom(pkt);
2984 tailroom = rte_pktmbuf_tailroom(pkt);
2985 if (likely((headroom >= 24) &&
2987 /* In 83XX this is prerequivisit for Direct mode */
2988 *flags |= SINGLE_BUF_HEADTAILROOM;
2990 param->bufs[0].vaddr = seg_data;
2991 param->bufs[0].dma_addr = seg_phys;
2992 param->bufs[0].size = seg_size;
2995 iovec = param->src_iov;
2996 iovec->bufs[index].vaddr = seg_data;
2997 iovec->bufs[index].dma_addr = seg_phys;
2998 iovec->bufs[index].size = seg_size;
3002 while (unlikely(pkt != NULL)) {
3003 seg_data = rte_pktmbuf_mtod(pkt, void *);
3004 seg_phys = rte_pktmbuf_iova(pkt);
3005 seg_size = pkt->data_len;
3010 iovec->bufs[index].vaddr = seg_data;
3011 iovec->bufs[index].dma_addr = seg_phys;
3012 iovec->bufs[index].size = seg_size;
3019 iovec->buf_cnt = index;
3023 static __rte_always_inline int
3024 fill_fc_params(struct rte_crypto_op *cop,
3025 struct cpt_sess_misc *sess_misc,
3026 struct cpt_qp_meta_info *m_info,
3031 struct rte_crypto_sym_op *sym_op = cop->sym;
3034 uint32_t mc_hash_off;
3036 uint64_t d_offs, d_lens;
3037 struct rte_mbuf *m_src, *m_dst;
3038 uint8_t cpt_op = sess_misc->cpt_op;
3039 #ifdef CPT_ALWAYS_USE_SG_MODE
3040 uint8_t inplace = 0;
3042 uint8_t inplace = 1;
3044 fc_params_t fc_params;
3045 char src[SRC_IOV_SIZE];
3046 char dst[SRC_IOV_SIZE];
3050 if (likely(sess_misc->iv_length)) {
3051 flags |= VALID_IV_BUF;
3052 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3053 uint8_t *, sess_misc->iv_offset);
3054 if (sess_misc->aes_ctr &&
3055 unlikely(sess_misc->iv_length != 16)) {
3056 memcpy((uint8_t *)iv_buf,
3057 rte_crypto_op_ctod_offset(cop,
3058 uint8_t *, sess_misc->iv_offset), 12);
3059 iv_buf[3] = rte_cpu_to_be_32(0x1);
3060 fc_params.iv_buf = iv_buf;
3064 if (sess_misc->zsk_flag) {
3065 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3067 sess_misc->auth_iv_offset);
3068 if (sess_misc->zsk_flag != ZS_EA)
3071 m_src = sym_op->m_src;
3072 m_dst = sym_op->m_dst;
3074 if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3079 d_offs = sym_op->aead.data.offset;
3080 d_lens = sym_op->aead.data.length;
3081 mc_hash_off = sym_op->aead.data.offset +
3082 sym_op->aead.data.length;
3084 aad_data = sym_op->aead.aad.data;
3085 aad_len = sess_misc->aad_length;
3086 if (likely((aad_data + aad_len) ==
3087 rte_pktmbuf_mtod_offset(m_src,
3089 sym_op->aead.data.offset))) {
3090 d_offs = (d_offs - aad_len) | (d_offs << 16);
3091 d_lens = (d_lens + aad_len) | (d_lens << 32);
3093 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3094 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3095 fc_params.aad_buf.size = aad_len;
3096 flags |= VALID_AAD_BUF;
3098 d_offs = d_offs << 16;
3099 d_lens = d_lens << 32;
3102 salt = fc_params.iv_buf;
3103 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3104 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3105 sess_misc->salt = *(uint32_t *)salt;
3107 fc_params.iv_buf = salt + 4;
3108 if (likely(sess_misc->mac_len)) {
3109 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3115 /* hmac immediately following data is best case */
3116 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3118 (uint8_t *)sym_op->aead.digest.data)) {
3119 flags |= VALID_MAC_BUF;
3120 fc_params.mac_buf.size = sess_misc->mac_len;
3121 fc_params.mac_buf.vaddr =
3122 sym_op->aead.digest.data;
3123 fc_params.mac_buf.dma_addr =
3124 sym_op->aead.digest.phys_addr;
3129 d_offs = sym_op->cipher.data.offset;
3130 d_lens = sym_op->cipher.data.length;
3131 mc_hash_off = sym_op->cipher.data.offset +
3132 sym_op->cipher.data.length;
3133 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3134 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3136 if (mc_hash_off < (sym_op->auth.data.offset +
3137 sym_op->auth.data.length)){
3138 mc_hash_off = (sym_op->auth.data.offset +
3139 sym_op->auth.data.length);
3141 /* for gmac, salt should be updated like in gcm */
3142 if (unlikely(sess_misc->is_gmac)) {
3144 salt = fc_params.iv_buf;
3145 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3146 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3147 sess_misc->salt = *(uint32_t *)salt;
3149 fc_params.iv_buf = salt + 4;
3151 if (likely(sess_misc->mac_len)) {
3154 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3158 /* hmac immediately following data is best case */
3159 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3161 (uint8_t *)sym_op->auth.digest.data)) {
3162 flags |= VALID_MAC_BUF;
3163 fc_params.mac_buf.size =
3165 fc_params.mac_buf.vaddr =
3166 sym_op->auth.digest.data;
3167 fc_params.mac_buf.dma_addr =
3168 sym_op->auth.digest.phys_addr;
3173 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3174 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3176 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3179 if (likely(!m_dst && inplace)) {
3180 /* Case of single buffer without AAD buf or
3181 * separate mac buf in place and
3184 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3186 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3189 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3195 /* Out of place processing */
3196 fc_params.src_iov = (void *)src;
3197 fc_params.dst_iov = (void *)dst;
3199 /* Store SG I/O in the api for reuse */
3200 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3201 CPT_LOG_DP_ERR("Prepare src iov failed");
3206 if (unlikely(m_dst != NULL)) {
3209 /* Try to make room as much as src has */
3210 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3212 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3213 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3214 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3215 CPT_LOG_DP_ERR("Not enough space in "
3224 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3225 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3231 fc_params.dst_iov = (void *)src;
3235 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3236 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3237 m_info->lb_mlen, m_info->pool);
3239 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3240 m_info->sg_mlen, m_info->pool);
3242 if (unlikely(mdata == NULL)) {
3243 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3248 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3249 op[0] = (uintptr_t)mdata;
3250 op[1] = (uintptr_t)cop;
3251 op[2] = op[3] = 0; /* Used to indicate auth verify */
3252 space += 4 * sizeof(uint64_t);
3254 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3255 fc_params.meta_buf.dma_addr += space;
3256 fc_params.meta_buf.size -= space;
3258 /* Finally prepare the instruction */
3259 if (cpt_op & CPT_OP_ENCODE)
3260 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3263 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3266 if (unlikely(*prep_req == NULL)) {
3267 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3269 goto free_mdata_and_exit;
3276 free_mdata_and_exit:
3277 free_op_meta(mdata, m_info->pool);
3282 static __rte_always_inline void
3283 compl_auth_verify(struct rte_crypto_op *op,
3288 struct rte_crypto_sym_op *sym_op = op->sym;
3290 if (sym_op->auth.digest.data)
3291 mac = sym_op->auth.digest.data;
3293 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3295 sym_op->auth.data.length +
3296 sym_op->auth.data.offset);
3298 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3302 if (memcmp(mac, gen_mac, mac_len))
3303 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3305 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3308 static __rte_always_inline void
3309 find_kasumif9_direction_and_length(uint8_t *src,
3310 uint32_t counter_num_bytes,
3311 uint32_t *addr_length_in_bits,
3312 uint8_t *addr_direction)
3317 while (!found && counter_num_bytes > 0) {
3318 counter_num_bytes--;
3319 if (src[counter_num_bytes] == 0x00)
3321 pos = rte_bsf32(src[counter_num_bytes]);
3323 if (likely(counter_num_bytes > 0)) {
3324 last_byte = src[counter_num_bytes - 1];
3325 *addr_direction = last_byte & 0x1;
3326 *addr_length_in_bits = counter_num_bytes * 8
3330 last_byte = src[counter_num_bytes];
3331 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3332 *addr_length_in_bits = counter_num_bytes * 8
3340 * This handles all auth only except AES_GMAC
3342 static __rte_always_inline int
3343 fill_digest_params(struct rte_crypto_op *cop,
3344 struct cpt_sess_misc *sess,
3345 struct cpt_qp_meta_info *m_info,
3350 struct rte_crypto_sym_op *sym_op = cop->sym;
3354 uint32_t auth_range_off;
3356 uint64_t d_offs = 0, d_lens;
3357 struct rte_mbuf *m_src, *m_dst;
3358 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3359 uint16_t mac_len = sess->mac_len;
3361 char src[SRC_IOV_SIZE];
3365 memset(¶ms, 0, sizeof(fc_params_t));
3367 m_src = sym_op->m_src;
3369 /* For just digest lets force mempool alloc */
3370 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3372 if (mdata == NULL) {
3377 mphys = params.meta_buf.dma_addr;
3380 op[0] = (uintptr_t)mdata;
3381 op[1] = (uintptr_t)cop;
3382 op[2] = op[3] = 0; /* Used to indicate auth verify */
3383 space += 4 * sizeof(uint64_t);
3385 auth_range_off = sym_op->auth.data.offset;
3387 flags = VALID_MAC_BUF;
3388 params.src_iov = (void *)src;
3389 if (unlikely(sess->zsk_flag)) {
3391 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3392 * we will send pass through even for auth only case,
3395 d_offs = auth_range_off;
3397 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3398 uint8_t *, sess->auth_iv_offset);
3399 if (sess->zsk_flag == K_F9) {
3400 uint32_t length_in_bits, num_bytes;
3401 uint8_t *src, direction = 0;
3403 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3406 * This is kasumi f9, take direction from
3409 length_in_bits = cop->sym->auth.data.length;
3410 num_bytes = (length_in_bits >> 3);
3411 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3412 find_kasumif9_direction_and_length(src,
3416 length_in_bits -= 64;
3417 cop->sym->auth.data.offset += 64;
3418 d_offs = cop->sym->auth.data.offset;
3419 auth_range_off = d_offs / 8;
3420 cop->sym->auth.data.length = length_in_bits;
3422 /* Store it at end of auth iv */
3423 iv_buf[8] = direction;
3424 params.auth_iv_buf = iv_buf;
3428 d_lens = sym_op->auth.data.length;
3430 params.ctx_buf.vaddr = SESS_PRIV(sess);
3431 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3433 if (auth_op == CPT_OP_AUTH_GENERATE) {
3434 if (sym_op->auth.digest.data) {
3436 * Digest to be generated
3437 * in separate buffer
3439 params.mac_buf.size =
3441 params.mac_buf.vaddr =
3442 sym_op->auth.digest.data;
3443 params.mac_buf.dma_addr =
3444 sym_op->auth.digest.phys_addr;
3446 uint32_t off = sym_op->auth.data.offset +
3447 sym_op->auth.data.length;
3448 int32_t dlen, space;
3450 m_dst = sym_op->m_dst ?
3451 sym_op->m_dst : sym_op->m_src;
3452 dlen = rte_pktmbuf_pkt_len(m_dst);
3454 space = off + mac_len - dlen;
3456 if (!rte_pktmbuf_append(m_dst, space)) {
3457 CPT_LOG_DP_ERR("Failed to extend "
3458 "mbuf by %uB", space);
3460 goto free_mdata_and_exit;
3463 params.mac_buf.vaddr =
3464 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3465 params.mac_buf.dma_addr =
3466 rte_pktmbuf_iova_offset(m_dst, off);
3467 params.mac_buf.size = mac_len;
3470 /* Need space for storing generated mac */
3471 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3472 params.mac_buf.dma_addr = mphys + space;
3473 params.mac_buf.size = mac_len;
3474 space += RTE_ALIGN_CEIL(mac_len, 8);
3475 op[2] = (uintptr_t)params.mac_buf.vaddr;
3479 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3480 params.meta_buf.dma_addr = mphys + space;
3481 params.meta_buf.size -= space;
3483 /* Out of place processing */
3484 params.src_iov = (void *)src;
3486 /*Store SG I/O in the api for reuse */
3487 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3488 CPT_LOG_DP_ERR("Prepare src iov failed");
3490 goto free_mdata_and_exit;
3493 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3494 if (unlikely(*prep_req == NULL)) {
3496 goto free_mdata_and_exit;
3503 free_mdata_and_exit:
3504 free_op_meta(mdata, m_info->pool);
3509 #endif /*_CPT_UCODE_H_ */