1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
29 * Microcode only supports the following combination.
30 * Encryption followed by authentication
31 * Authentication followed by decryption
34 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 /* Unsupported as of now by microcode */
38 CPT_LOG_DP_ERR("Unsupported combination");
41 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 /* For GMAC auth there is no cipher operation */
45 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 xform->next->auth.algo !=
47 RTE_CRYPTO_AUTH_AES_GMAC) {
48 /* Unsupported as of now by microcode */
49 CPT_LOG_DP_ERR("Unsupported combination");
57 static __rte_always_inline void
58 gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
62 for (i = 0; i < 4; i++) {
64 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 (ck[base + 2] << 8) | (ck[base + 3]);
66 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
74 struct cpt_ctx *cpt_ctx = ctx;
75 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
91 static __rte_always_inline int
92 cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
109 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
114 key_len = key_len / 2;
115 if (unlikely(key_len == CPT_BYTE_24)) {
116 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
119 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 if (unlikely(key_len != 16))
127 /* No support for AEAD yet */
128 if (unlikely(cpt_ctx->hash_type))
130 fc_type = ZUC_SNOW3G;
134 if (unlikely(key_len != 16))
136 /* No support for AEAD yet */
137 if (unlikely(cpt_ctx->hash_type))
147 static __rte_always_inline void
148 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
150 cpt_ctx->enc_cipher = 0;
151 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
154 static __rte_always_inline void
155 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
157 mc_aes_type_t aes_key_type = 0;
160 aes_key_type = AES_128_BIT;
163 aes_key_type = AES_192_BIT;
166 aes_key_type = AES_256_BIT;
169 /* This should not happen */
170 CPT_LOG_DP_ERR("Invalid AES key len");
173 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
176 static __rte_always_inline void
177 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
182 gen_key_snow3g(key, keyx);
183 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
184 cpt_ctx->fc_type = ZUC_SNOW3G;
185 cpt_ctx->zsk_flags = 0;
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
193 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 cpt_ctx->fc_type = ZUC_SNOW3G;
196 cpt_ctx->zsk_flags = 0;
199 static __rte_always_inline void
200 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
204 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
205 cpt_ctx->zsk_flags = 0;
206 cpt_ctx->fc_type = KASUMI;
209 static __rte_always_inline void
210 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
213 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
214 cpt_ctx->zsk_flags = 0;
215 cpt_ctx->fc_type = KASUMI;
218 static __rte_always_inline int
219 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
220 uint16_t key_len, uint8_t *salt)
222 struct cpt_ctx *cpt_ctx = ctx;
223 mc_fc_context_t *fctx = &cpt_ctx->fctx;
224 uint64_t *ctrl_flags = NULL;
227 /* Validate key before proceeding */
228 fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
229 if (unlikely(fc_type == -1))
232 if (fc_type == FC_GEN) {
233 cpt_ctx->fc_type = FC_GEN;
234 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
235 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
237 * We need to always say IV is from DPTR as user can
238 * sometimes iverride IV per operation.
240 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
245 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
248 /* CPT performs DES using 3DES with the 8B DES-key
249 * replicated 2 more times to match the 24B 3DES-key.
250 * Eg. If org. key is "0x0a 0x0b", then new key is
251 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
254 /* Skipping the first 8B as it will be copied
255 * in the regular code flow
257 memcpy(fctx->enc.encr_key+key_len, key, key_len);
258 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
262 /* For DES3_ECB IV need to be from CTX. */
263 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
269 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
272 /* Even though iv source is from dptr,
273 * aes_gcm salt is taken from ctx
276 memcpy(fctx->enc.encr_iv, salt, 4);
277 /* Assuming it was just salt update
283 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
286 key_len = key_len / 2;
287 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
289 /* Copy key2 for XTS into ipad */
290 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
291 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
294 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
297 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
300 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
303 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
309 /* Only for FC_GEN case */
311 /* For GMAC auth, cipher must be NULL */
312 if (cpt_ctx->hash_type != GMAC_TYPE)
313 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
315 memcpy(fctx->enc.encr_key, key, key_len);
318 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
321 cpt_ctx->enc_cipher = type;
326 static __rte_always_inline uint32_t
327 fill_sg_comp(sg_comp_t *list,
329 phys_addr_t dma_addr,
332 sg_comp_t *to = &list[i>>2];
334 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
335 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
340 static __rte_always_inline uint32_t
341 fill_sg_comp_from_buf(sg_comp_t *list,
345 sg_comp_t *to = &list[i>>2];
347 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
348 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
353 static __rte_always_inline uint32_t
354 fill_sg_comp_from_buf_min(sg_comp_t *list,
359 sg_comp_t *to = &list[i >> 2];
360 uint32_t size = *psize;
363 e_len = (size > from->size) ? from->size : size;
364 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
365 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
372 * This fills the MC expected SGIO list
373 * from IOV given by user.
375 static __rte_always_inline uint32_t
376 fill_sg_comp_from_iov(sg_comp_t *list,
378 iov_ptr_t *from, uint32_t from_offset,
379 uint32_t *psize, buf_ptr_t *extra_buf,
380 uint32_t extra_offset)
383 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
384 uint32_t size = *psize - extra_len;
388 for (j = 0; (j < from->buf_cnt) && size; j++) {
389 phys_addr_t e_dma_addr;
391 sg_comp_t *to = &list[i >> 2];
396 if (unlikely(from_offset)) {
397 if (from_offset >= bufs[j].size) {
398 from_offset -= bufs[j].size;
401 e_dma_addr = bufs[j].dma_addr + from_offset;
402 e_len = (size > (bufs[j].size - from_offset)) ?
403 (bufs[j].size - from_offset) : size;
406 e_dma_addr = bufs[j].dma_addr;
407 e_len = (size > bufs[j].size) ?
411 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
412 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
414 if (extra_len && (e_len >= extra_offset)) {
415 /* Break the data at given offset */
416 uint32_t next_len = e_len - extra_offset;
417 phys_addr_t next_dma = e_dma_addr + extra_offset;
422 e_len = extra_offset;
424 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
427 /* Insert extra data ptr */
432 rte_cpu_to_be_16(extra_buf->size);
434 rte_cpu_to_be_64(extra_buf->dma_addr);
436 /* size already decremented by extra len */
439 /* insert the rest of the data */
443 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
444 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
453 extra_offset -= size;
461 static __rte_always_inline void
462 cpt_digest_gen_prep(uint32_t flags,
464 digest_params_t *params,
468 struct cpt_request_info *req;
471 uint16_t data_len, mac_len, key_len;
472 auth_type_t hash_type;
475 sg_comp_t *gather_comp;
476 sg_comp_t *scatter_comp;
478 uint32_t g_size_bytes, s_size_bytes;
479 uint64_t dptr_dma, rptr_dma;
480 vq_cmd_word0_t vq_cmd_w0;
481 vq_cmd_word3_t vq_cmd_w3;
482 void *c_vaddr, *m_vaddr;
483 uint64_t c_dma, m_dma;
484 opcode_info_t opcode;
486 ctx = params->ctx_buf.vaddr;
487 meta_p = ¶ms->meta_buf;
489 m_vaddr = meta_p->vaddr;
490 m_dma = meta_p->dma_addr;
491 m_size = meta_p->size;
494 * Save initial space that followed app data for completion code &
495 * alternate completion code to fall in same cache line as app data
497 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
498 m_dma += COMPLETION_CODE_SIZE;
499 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
501 c_vaddr = (uint8_t *)m_vaddr + size;
502 c_dma = m_dma + size;
503 size += sizeof(cpt_res_s_t);
505 m_vaddr = (uint8_t *)m_vaddr + size;
511 size = sizeof(struct cpt_request_info);
512 m_vaddr = (uint8_t *)m_vaddr + size;
516 hash_type = ctx->hash_type;
517 mac_len = ctx->mac_len;
518 key_len = ctx->auth_key_len;
519 data_len = AUTH_DLEN(d_lens);
523 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
525 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
526 vq_cmd_w0.s.param1 = key_len;
527 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
529 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
530 vq_cmd_w0.s.param1 = 0;
531 vq_cmd_w0.s.dlen = data_len;
536 /* Null auth only case enters the if */
537 if (unlikely(!hash_type && !ctx->enc_cipher)) {
538 opcode.s.major = CPT_MAJOR_OP_MISC;
539 /* Minor op is passthrough */
540 opcode.s.minor = 0x03;
541 /* Send out completion code only */
542 vq_cmd_w0.s.param2 = 0x1;
545 vq_cmd_w0.s.opcode = opcode.flags;
547 /* DPTR has SG list */
551 ((uint16_t *)in_buffer)[0] = 0;
552 ((uint16_t *)in_buffer)[1] = 0;
554 /* TODO Add error check if space will be sufficient */
555 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
564 uint64_t k_dma = params->ctx_buf.dma_addr +
565 offsetof(struct cpt_ctx, auth_key);
567 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
573 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
575 if (unlikely(size)) {
576 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
582 * Looks like we need to support zero data
583 * gather ptr in case of hash & hmac
587 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
588 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
595 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
597 if (flags & VALID_MAC_BUF) {
598 if (unlikely(params->mac_buf.size < mac_len)) {
599 CPT_LOG_DP_ERR("Insufficient MAC size");
604 i = fill_sg_comp_from_buf_min(scatter_comp, i,
605 ¶ms->mac_buf, &size);
608 i = fill_sg_comp_from_iov(scatter_comp, i,
609 params->src_iov, data_len,
611 if (unlikely(size)) {
612 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
618 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
619 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
621 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
623 /* This is DPTR len incase of SG mode */
624 vq_cmd_w0.s.dlen = size;
626 m_vaddr = (uint8_t *)m_vaddr + size;
630 /* cpt alternate completion address saved earlier */
631 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
632 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
633 rptr_dma = c_dma - 8;
635 req->ist.ei1 = dptr_dma;
636 req->ist.ei2 = rptr_dma;
641 /* 16 byte aligned cpt res address */
642 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
643 *req->completion_addr = COMPLETION_CODE_INIT;
644 req->comp_baddr = c_dma;
646 /* Fill microcode part of instruction */
647 req->ist.ei0 = vq_cmd_w0.u64;
648 req->ist.ei3 = vq_cmd_w3.u64;
656 static __rte_always_inline void
657 cpt_enc_hmac_prep(uint32_t flags,
660 fc_params_t *fc_params,
664 uint32_t iv_offset = 0;
665 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
666 struct cpt_ctx *cpt_ctx;
667 uint32_t cipher_type, hash_type;
668 uint32_t mac_len, size;
670 struct cpt_request_info *req;
671 buf_ptr_t *meta_p, *aad_buf = NULL;
672 uint32_t encr_offset, auth_offset;
673 uint32_t encr_data_len, auth_data_len, aad_len = 0;
674 uint32_t passthrough_len = 0;
675 void *m_vaddr, *offset_vaddr;
676 uint64_t m_dma, offset_dma, ctx_dma;
677 vq_cmd_word0_t vq_cmd_w0;
678 vq_cmd_word3_t vq_cmd_w3;
682 opcode_info_t opcode;
684 meta_p = &fc_params->meta_buf;
685 m_vaddr = meta_p->vaddr;
686 m_dma = meta_p->dma_addr;
687 m_size = meta_p->size;
689 encr_offset = ENCR_OFFSET(d_offs);
690 auth_offset = AUTH_OFFSET(d_offs);
691 encr_data_len = ENCR_DLEN(d_lens);
692 auth_data_len = AUTH_DLEN(d_lens);
693 if (unlikely(flags & VALID_AAD_BUF)) {
695 * We dont support both aad
696 * and auth data separately
700 aad_len = fc_params->aad_buf.size;
701 aad_buf = &fc_params->aad_buf;
703 cpt_ctx = fc_params->ctx_buf.vaddr;
704 cipher_type = cpt_ctx->enc_cipher;
705 hash_type = cpt_ctx->hash_type;
706 mac_len = cpt_ctx->mac_len;
709 * Save initial space that followed app data for completion code &
710 * alternate completion code to fall in same cache line as app data
712 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
713 m_dma += COMPLETION_CODE_SIZE;
714 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
717 c_vaddr = (uint8_t *)m_vaddr + size;
718 c_dma = m_dma + size;
719 size += sizeof(cpt_res_s_t);
721 m_vaddr = (uint8_t *)m_vaddr + size;
725 /* start cpt request info struct at 8 byte boundary */
726 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
729 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
731 size += sizeof(struct cpt_request_info);
732 m_vaddr = (uint8_t *)m_vaddr + size;
736 if (hash_type == GMAC_TYPE)
739 if (unlikely(!(flags & VALID_IV_BUF))) {
741 iv_offset = ENCR_IV_OFFSET(d_offs);
744 if (unlikely(flags & VALID_AAD_BUF)) {
746 * When AAD is given, data above encr_offset is pass through
747 * Since AAD is given as separate pointer and not as offset,
748 * this is a special case as we need to fragment input data
749 * into passthrough + encr_data and then insert AAD in between.
751 if (hash_type != GMAC_TYPE) {
752 passthrough_len = encr_offset;
753 auth_offset = passthrough_len + iv_len;
754 encr_offset = passthrough_len + aad_len + iv_len;
755 auth_data_len = aad_len + encr_data_len;
757 passthrough_len = 16 + aad_len;
758 auth_offset = passthrough_len + iv_len;
759 auth_data_len = aad_len;
762 encr_offset += iv_len;
763 auth_offset += iv_len;
767 opcode.s.major = CPT_MAJOR_OP_FC;
770 auth_dlen = auth_offset + auth_data_len;
771 enc_dlen = encr_data_len + encr_offset;
772 if (unlikely(encr_data_len & 0xf)) {
773 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
774 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
775 else if (likely((cipher_type == AES_CBC) ||
776 (cipher_type == AES_ECB)))
777 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
780 if (unlikely(hash_type == GMAC_TYPE)) {
781 encr_offset = auth_dlen;
785 if (unlikely(auth_dlen > enc_dlen)) {
786 inputlen = auth_dlen;
787 outputlen = auth_dlen + mac_len;
790 outputlen = enc_dlen + mac_len;
795 vq_cmd_w0.s.param1 = encr_data_len;
796 vq_cmd_w0.s.param2 = auth_data_len;
798 * In 83XX since we have a limitation of
799 * IV & Offset control word not part of instruction
800 * and need to be part of Data Buffer, we check if
801 * head room is there and then only do the Direct mode processing
803 if (likely((flags & SINGLE_BUF_INPLACE) &&
804 (flags & SINGLE_BUF_HEADTAILROOM))) {
805 void *dm_vaddr = fc_params->bufs[0].vaddr;
806 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
808 * This flag indicates that there is 24 bytes head room and
809 * 8 bytes tail room available, so that we get to do
810 * DIRECT MODE with limitation
813 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
814 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
817 req->ist.ei1 = offset_dma;
818 /* RPTR should just exclude offset control word */
819 req->ist.ei2 = dm_dma_addr - iv_len;
820 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
821 + outputlen - iv_len);
823 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
825 vq_cmd_w0.s.opcode = opcode.flags;
827 if (likely(iv_len)) {
828 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
830 uint64_t *src = fc_params->iv_buf;
835 *(uint64_t *)offset_vaddr =
836 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
837 ((uint64_t)iv_offset << 8) |
838 ((uint64_t)auth_offset));
841 uint32_t i, g_size_bytes, s_size_bytes;
842 uint64_t dptr_dma, rptr_dma;
843 sg_comp_t *gather_comp;
844 sg_comp_t *scatter_comp;
847 /* This falls under strict SG mode */
848 offset_vaddr = m_vaddr;
850 size = OFF_CTRL_LEN + iv_len;
852 m_vaddr = (uint8_t *)m_vaddr + size;
856 opcode.s.major |= CPT_DMA_MODE;
858 vq_cmd_w0.s.opcode = opcode.flags;
860 if (likely(iv_len)) {
861 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
863 uint64_t *src = fc_params->iv_buf;
868 *(uint64_t *)offset_vaddr =
869 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
870 ((uint64_t)iv_offset << 8) |
871 ((uint64_t)auth_offset));
873 /* DPTR has SG list */
877 ((uint16_t *)in_buffer)[0] = 0;
878 ((uint16_t *)in_buffer)[1] = 0;
880 /* TODO Add error check if space will be sufficient */
881 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
889 /* Offset control word that includes iv */
890 i = fill_sg_comp(gather_comp, i, offset_dma,
891 OFF_CTRL_LEN + iv_len);
894 size = inputlen - iv_len;
896 uint32_t aad_offset = aad_len ? passthrough_len : 0;
898 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
899 i = fill_sg_comp_from_buf_min(gather_comp, i,
903 i = fill_sg_comp_from_iov(gather_comp, i,
906 aad_buf, aad_offset);
909 if (unlikely(size)) {
910 CPT_LOG_DP_ERR("Insufficient buffer space,"
911 " size %d needed", size);
915 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
916 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
919 * Output Scatter list
923 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
926 if (likely(iv_len)) {
927 i = fill_sg_comp(scatter_comp, i,
928 offset_dma + OFF_CTRL_LEN,
932 /* output data or output data + digest*/
933 if (unlikely(flags & VALID_MAC_BUF)) {
934 size = outputlen - iv_len - mac_len;
936 uint32_t aad_offset =
937 aad_len ? passthrough_len : 0;
939 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
940 i = fill_sg_comp_from_buf_min(
946 i = fill_sg_comp_from_iov(scatter_comp,
954 if (unlikely(size)) {
955 CPT_LOG_DP_ERR("Insufficient buffer"
956 " space, size %d needed",
963 i = fill_sg_comp_from_buf(scatter_comp, i,
964 &fc_params->mac_buf);
967 /* Output including mac */
968 size = outputlen - iv_len;
970 uint32_t aad_offset =
971 aad_len ? passthrough_len : 0;
973 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
974 i = fill_sg_comp_from_buf_min(
980 i = fill_sg_comp_from_iov(scatter_comp,
988 if (unlikely(size)) {
989 CPT_LOG_DP_ERR("Insufficient buffer"
990 " space, size %d needed",
996 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
997 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
999 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1001 /* This is DPTR len incase of SG mode */
1002 vq_cmd_w0.s.dlen = size;
1004 m_vaddr = (uint8_t *)m_vaddr + size;
1008 /* cpt alternate completion address saved earlier */
1009 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1010 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1011 rptr_dma = c_dma - 8;
1013 req->ist.ei1 = dptr_dma;
1014 req->ist.ei2 = rptr_dma;
1017 ctx_dma = fc_params->ctx_buf.dma_addr +
1018 offsetof(struct cpt_ctx, fctx);
1021 vq_cmd_w3.s.grp = 0;
1022 vq_cmd_w3.s.cptr = ctx_dma;
1024 /* 16 byte aligned cpt res address */
1025 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1026 *req->completion_addr = COMPLETION_CODE_INIT;
1027 req->comp_baddr = c_dma;
1029 /* Fill microcode part of instruction */
1030 req->ist.ei0 = vq_cmd_w0.u64;
1031 req->ist.ei3 = vq_cmd_w3.u64;
1039 static __rte_always_inline void
1040 cpt_dec_hmac_prep(uint32_t flags,
1043 fc_params_t *fc_params,
1047 uint32_t iv_offset = 0, size;
1048 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1049 struct cpt_ctx *cpt_ctx;
1050 int32_t hash_type, mac_len, m_size;
1051 uint8_t iv_len = 16;
1052 struct cpt_request_info *req;
1053 buf_ptr_t *meta_p, *aad_buf = NULL;
1054 uint32_t encr_offset, auth_offset;
1055 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1056 uint32_t passthrough_len = 0;
1057 void *m_vaddr, *offset_vaddr;
1058 uint64_t m_dma, offset_dma, ctx_dma;
1059 opcode_info_t opcode;
1060 vq_cmd_word0_t vq_cmd_w0;
1061 vq_cmd_word3_t vq_cmd_w3;
1065 meta_p = &fc_params->meta_buf;
1066 m_vaddr = meta_p->vaddr;
1067 m_dma = meta_p->dma_addr;
1068 m_size = meta_p->size;
1070 encr_offset = ENCR_OFFSET(d_offs);
1071 auth_offset = AUTH_OFFSET(d_offs);
1072 encr_data_len = ENCR_DLEN(d_lens);
1073 auth_data_len = AUTH_DLEN(d_lens);
1075 if (unlikely(flags & VALID_AAD_BUF)) {
1077 * We dont support both aad
1078 * and auth data separately
1082 aad_len = fc_params->aad_buf.size;
1083 aad_buf = &fc_params->aad_buf;
1086 cpt_ctx = fc_params->ctx_buf.vaddr;
1087 hash_type = cpt_ctx->hash_type;
1088 mac_len = cpt_ctx->mac_len;
1090 if (hash_type == GMAC_TYPE)
1093 if (unlikely(!(flags & VALID_IV_BUF))) {
1095 iv_offset = ENCR_IV_OFFSET(d_offs);
1098 if (unlikely(flags & VALID_AAD_BUF)) {
1100 * When AAD is given, data above encr_offset is pass through
1101 * Since AAD is given as separate pointer and not as offset,
1102 * this is a special case as we need to fragment input data
1103 * into passthrough + encr_data and then insert AAD in between.
1105 if (hash_type != GMAC_TYPE) {
1106 passthrough_len = encr_offset;
1107 auth_offset = passthrough_len + iv_len;
1108 encr_offset = passthrough_len + aad_len + iv_len;
1109 auth_data_len = aad_len + encr_data_len;
1111 passthrough_len = 16 + aad_len;
1112 auth_offset = passthrough_len + iv_len;
1113 auth_data_len = aad_len;
1116 encr_offset += iv_len;
1117 auth_offset += iv_len;
1121 * Save initial space that followed app data for completion code &
1122 * alternate completion code to fall in same cache line as app data
1124 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1125 m_dma += COMPLETION_CODE_SIZE;
1126 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1128 c_vaddr = (uint8_t *)m_vaddr + size;
1129 c_dma = m_dma + size;
1130 size += sizeof(cpt_res_s_t);
1132 m_vaddr = (uint8_t *)m_vaddr + size;
1136 /* start cpt request info structure at 8 byte alignment */
1137 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1140 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1142 size += sizeof(struct cpt_request_info);
1143 m_vaddr = (uint8_t *)m_vaddr + size;
1148 opcode.s.major = CPT_MAJOR_OP_FC;
1151 enc_dlen = encr_offset + encr_data_len;
1152 auth_dlen = auth_offset + auth_data_len;
1154 if (auth_dlen > enc_dlen) {
1155 inputlen = auth_dlen + mac_len;
1156 outputlen = auth_dlen;
1158 inputlen = enc_dlen + mac_len;
1159 outputlen = enc_dlen;
1162 if (hash_type == GMAC_TYPE)
1163 encr_offset = inputlen;
1166 vq_cmd_w0.s.param1 = encr_data_len;
1167 vq_cmd_w0.s.param2 = auth_data_len;
1170 * In 83XX since we have a limitation of
1171 * IV & Offset control word not part of instruction
1172 * and need to be part of Data Buffer, we check if
1173 * head room is there and then only do the Direct mode processing
1175 if (likely((flags & SINGLE_BUF_INPLACE) &&
1176 (flags & SINGLE_BUF_HEADTAILROOM))) {
1177 void *dm_vaddr = fc_params->bufs[0].vaddr;
1178 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1180 * This flag indicates that there is 24 bytes head room and
1181 * 8 bytes tail room available, so that we get to do
1182 * DIRECT MODE with limitation
1185 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1186 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1187 req->ist.ei1 = offset_dma;
1189 /* RPTR should just exclude offset control word */
1190 req->ist.ei2 = dm_dma_addr - iv_len;
1192 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1193 outputlen - iv_len);
1194 /* since this is decryption,
1195 * don't touch the content of
1196 * alternate ccode space as it contains
1200 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1202 vq_cmd_w0.s.opcode = opcode.flags;
1204 if (likely(iv_len)) {
1205 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1207 uint64_t *src = fc_params->iv_buf;
1212 *(uint64_t *)offset_vaddr =
1213 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1214 ((uint64_t)iv_offset << 8) |
1215 ((uint64_t)auth_offset));
1218 uint64_t dptr_dma, rptr_dma;
1219 uint32_t g_size_bytes, s_size_bytes;
1220 sg_comp_t *gather_comp;
1221 sg_comp_t *scatter_comp;
1225 /* This falls under strict SG mode */
1226 offset_vaddr = m_vaddr;
1228 size = OFF_CTRL_LEN + iv_len;
1230 m_vaddr = (uint8_t *)m_vaddr + size;
1234 opcode.s.major |= CPT_DMA_MODE;
1236 vq_cmd_w0.s.opcode = opcode.flags;
1238 if (likely(iv_len)) {
1239 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1241 uint64_t *src = fc_params->iv_buf;
1246 *(uint64_t *)offset_vaddr =
1247 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1248 ((uint64_t)iv_offset << 8) |
1249 ((uint64_t)auth_offset));
1251 /* DPTR has SG list */
1252 in_buffer = m_vaddr;
1255 ((uint16_t *)in_buffer)[0] = 0;
1256 ((uint16_t *)in_buffer)[1] = 0;
1258 /* TODO Add error check if space will be sufficient */
1259 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1266 /* Offset control word that includes iv */
1267 i = fill_sg_comp(gather_comp, i, offset_dma,
1268 OFF_CTRL_LEN + iv_len);
1270 /* Add input data */
1271 if (flags & VALID_MAC_BUF) {
1272 size = inputlen - iv_len - mac_len;
1274 /* input data only */
1275 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1276 i = fill_sg_comp_from_buf_min(
1281 uint32_t aad_offset = aad_len ?
1282 passthrough_len : 0;
1284 i = fill_sg_comp_from_iov(gather_comp,
1291 if (unlikely(size)) {
1292 CPT_LOG_DP_ERR("Insufficient buffer"
1293 " space, size %d needed",
1301 i = fill_sg_comp_from_buf(gather_comp, i,
1302 &fc_params->mac_buf);
1305 /* input data + mac */
1306 size = inputlen - iv_len;
1308 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1309 i = fill_sg_comp_from_buf_min(
1314 uint32_t aad_offset = aad_len ?
1315 passthrough_len : 0;
1317 if (unlikely(!fc_params->src_iov)) {
1318 CPT_LOG_DP_ERR("Bad input args");
1322 i = fill_sg_comp_from_iov(
1330 if (unlikely(size)) {
1331 CPT_LOG_DP_ERR("Insufficient buffer"
1332 " space, size %d needed",
1338 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1339 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1342 * Output Scatter List
1347 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1351 i = fill_sg_comp(scatter_comp, i,
1352 offset_dma + OFF_CTRL_LEN,
1356 /* Add output data */
1357 size = outputlen - iv_len;
1359 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1360 /* handle single buffer here */
1361 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1365 uint32_t aad_offset = aad_len ?
1366 passthrough_len : 0;
1368 if (unlikely(!fc_params->dst_iov)) {
1369 CPT_LOG_DP_ERR("Bad input args");
1373 i = fill_sg_comp_from_iov(scatter_comp, i,
1374 fc_params->dst_iov, 0,
1379 if (unlikely(size)) {
1380 CPT_LOG_DP_ERR("Insufficient buffer space,"
1381 " size %d needed", size);
1386 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1387 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1389 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1391 /* This is DPTR len incase of SG mode */
1392 vq_cmd_w0.s.dlen = size;
1394 m_vaddr = (uint8_t *)m_vaddr + size;
1398 /* cpt alternate completion address saved earlier */
1399 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1400 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1401 rptr_dma = c_dma - 8;
1402 size += COMPLETION_CODE_SIZE;
1404 req->ist.ei1 = dptr_dma;
1405 req->ist.ei2 = rptr_dma;
1408 ctx_dma = fc_params->ctx_buf.dma_addr +
1409 offsetof(struct cpt_ctx, fctx);
1412 vq_cmd_w3.s.grp = 0;
1413 vq_cmd_w3.s.cptr = ctx_dma;
1415 /* 16 byte aligned cpt res address */
1416 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1417 *req->completion_addr = COMPLETION_CODE_INIT;
1418 req->comp_baddr = c_dma;
1420 /* Fill microcode part of instruction */
1421 req->ist.ei0 = vq_cmd_w0.u64;
1422 req->ist.ei3 = vq_cmd_w3.u64;
1430 static __rte_always_inline void
1431 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1434 fc_params_t *params,
1439 int32_t inputlen, outputlen;
1440 struct cpt_ctx *cpt_ctx;
1441 uint32_t mac_len = 0;
1443 struct cpt_request_info *req;
1445 uint32_t encr_offset = 0, auth_offset = 0;
1446 uint32_t encr_data_len = 0, auth_data_len = 0;
1447 int flags, iv_len = 16, m_size;
1448 void *m_vaddr, *c_vaddr;
1449 uint64_t m_dma, c_dma, offset_ctrl;
1450 uint64_t *offset_vaddr, offset_dma;
1451 uint32_t *iv_s, iv[4];
1452 vq_cmd_word0_t vq_cmd_w0;
1453 vq_cmd_word3_t vq_cmd_w3;
1454 opcode_info_t opcode;
1456 buf_p = ¶ms->meta_buf;
1457 m_vaddr = buf_p->vaddr;
1458 m_dma = buf_p->dma_addr;
1459 m_size = buf_p->size;
1461 cpt_ctx = params->ctx_buf.vaddr;
1462 flags = cpt_ctx->zsk_flags;
1463 mac_len = cpt_ctx->mac_len;
1464 snow3g = cpt_ctx->snow3g;
1467 * Save initial space that followed app data for completion code &
1468 * alternate completion code to fall in same cache line as app data
1470 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1471 m_dma += COMPLETION_CODE_SIZE;
1472 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1475 c_vaddr = (uint8_t *)m_vaddr + size;
1476 c_dma = m_dma + size;
1477 size += sizeof(cpt_res_s_t);
1479 m_vaddr = (uint8_t *)m_vaddr + size;
1483 /* Reserve memory for cpt request info */
1486 size = sizeof(struct cpt_request_info);
1487 m_vaddr = (uint8_t *)m_vaddr + size;
1491 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1493 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1494 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1495 (0 << 3) | (flags & 0x7));
1499 * Microcode expects offsets in bytes
1500 * TODO: Rounding off
1502 auth_data_len = AUTH_DLEN(d_lens);
1505 auth_offset = AUTH_OFFSET(d_offs);
1506 auth_offset = auth_offset / 8;
1508 /* consider iv len */
1509 auth_offset += iv_len;
1511 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1512 outputlen = mac_len;
1514 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1519 * Microcode expects offsets in bytes
1520 * TODO: Rounding off
1522 encr_data_len = ENCR_DLEN(d_lens);
1524 encr_offset = ENCR_OFFSET(d_offs);
1525 encr_offset = encr_offset / 8;
1526 /* consider iv len */
1527 encr_offset += iv_len;
1529 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1530 outputlen = inputlen;
1532 /* iv offset is 0 */
1533 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1537 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1542 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1543 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1546 for (j = 0; j < 4; j++)
1547 iv[j] = iv_s[3 - j];
1549 /* ZUC doesn't need a swap */
1550 for (j = 0; j < 4; j++)
1555 * GP op header, lengths are expected in bits.
1558 vq_cmd_w0.s.param1 = encr_data_len;
1559 vq_cmd_w0.s.param2 = auth_data_len;
1562 * In 83XX since we have a limitation of
1563 * IV & Offset control word not part of instruction
1564 * and need to be part of Data Buffer, we check if
1565 * head room is there and then only do the Direct mode processing
1567 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1568 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1569 void *dm_vaddr = params->bufs[0].vaddr;
1570 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1572 * This flag indicates that there is 24 bytes head room and
1573 * 8 bytes tail room available, so that we get to do
1574 * DIRECT MODE with limitation
1577 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1578 OFF_CTRL_LEN - iv_len);
1579 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1582 req->ist.ei1 = offset_dma;
1583 /* RPTR should just exclude offset control word */
1584 req->ist.ei2 = dm_dma_addr - iv_len;
1585 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1586 + outputlen - iv_len);
1588 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1590 vq_cmd_w0.s.opcode = opcode.flags;
1592 if (likely(iv_len)) {
1593 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1595 memcpy(iv_d, iv, 16);
1598 *offset_vaddr = offset_ctrl;
1600 uint32_t i, g_size_bytes, s_size_bytes;
1601 uint64_t dptr_dma, rptr_dma;
1602 sg_comp_t *gather_comp;
1603 sg_comp_t *scatter_comp;
1607 /* save space for iv */
1608 offset_vaddr = m_vaddr;
1611 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1612 m_dma += OFF_CTRL_LEN + iv_len;
1613 m_size -= OFF_CTRL_LEN + iv_len;
1615 opcode.s.major |= CPT_DMA_MODE;
1617 vq_cmd_w0.s.opcode = opcode.flags;
1619 /* DPTR has SG list */
1620 in_buffer = m_vaddr;
1623 ((uint16_t *)in_buffer)[0] = 0;
1624 ((uint16_t *)in_buffer)[1] = 0;
1626 /* TODO Add error check if space will be sufficient */
1627 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1634 /* Offset control word followed by iv */
1636 i = fill_sg_comp(gather_comp, i, offset_dma,
1637 OFF_CTRL_LEN + iv_len);
1639 /* iv offset is 0 */
1640 *offset_vaddr = offset_ctrl;
1642 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1643 memcpy(iv_d, iv, 16);
1646 size = inputlen - iv_len;
1648 i = fill_sg_comp_from_iov(gather_comp, i,
1651 if (unlikely(size)) {
1652 CPT_LOG_DP_ERR("Insufficient buffer space,"
1653 " size %d needed", size);
1657 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1658 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1661 * Output Scatter List
1666 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1669 /* IV in SLIST only for EEA3 & UEA2 */
1674 i = fill_sg_comp(scatter_comp, i,
1675 offset_dma + OFF_CTRL_LEN, iv_len);
1678 /* Add output data */
1679 if (req_flags & VALID_MAC_BUF) {
1680 size = outputlen - iv_len - mac_len;
1682 i = fill_sg_comp_from_iov(scatter_comp, i,
1686 if (unlikely(size)) {
1687 CPT_LOG_DP_ERR("Insufficient buffer space,"
1688 " size %d needed", size);
1695 i = fill_sg_comp_from_buf(scatter_comp, i,
1699 /* Output including mac */
1700 size = outputlen - iv_len;
1702 i = fill_sg_comp_from_iov(scatter_comp, i,
1706 if (unlikely(size)) {
1707 CPT_LOG_DP_ERR("Insufficient buffer space,"
1708 " size %d needed", size);
1713 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1714 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1716 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1718 /* This is DPTR len incase of SG mode */
1719 vq_cmd_w0.s.dlen = size;
1721 m_vaddr = (uint8_t *)m_vaddr + size;
1725 /* cpt alternate completion address saved earlier */
1726 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1727 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1728 rptr_dma = c_dma - 8;
1730 req->ist.ei1 = dptr_dma;
1731 req->ist.ei2 = rptr_dma;
1736 vq_cmd_w3.s.grp = 0;
1737 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1738 offsetof(struct cpt_ctx, zs_ctx);
1740 /* 16 byte aligned cpt res address */
1741 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1742 *req->completion_addr = COMPLETION_CODE_INIT;
1743 req->comp_baddr = c_dma;
1745 /* Fill microcode part of instruction */
1746 req->ist.ei0 = vq_cmd_w0.u64;
1747 req->ist.ei3 = vq_cmd_w3.u64;
1755 static __rte_always_inline void
1756 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1759 fc_params_t *params,
1764 int32_t inputlen = 0, outputlen;
1765 struct cpt_ctx *cpt_ctx;
1766 uint8_t snow3g, iv_len = 16;
1767 struct cpt_request_info *req;
1769 uint32_t encr_offset;
1770 uint32_t encr_data_len;
1772 void *m_vaddr, *c_vaddr;
1773 uint64_t m_dma, c_dma;
1774 uint64_t *offset_vaddr, offset_dma;
1775 uint32_t *iv_s, iv[4], j;
1776 vq_cmd_word0_t vq_cmd_w0;
1777 vq_cmd_word3_t vq_cmd_w3;
1778 opcode_info_t opcode;
1780 buf_p = ¶ms->meta_buf;
1781 m_vaddr = buf_p->vaddr;
1782 m_dma = buf_p->dma_addr;
1783 m_size = buf_p->size;
1786 * Microcode expects offsets in bytes
1787 * TODO: Rounding off
1789 encr_offset = ENCR_OFFSET(d_offs) / 8;
1790 encr_data_len = ENCR_DLEN(d_lens);
1792 cpt_ctx = params->ctx_buf.vaddr;
1793 flags = cpt_ctx->zsk_flags;
1794 snow3g = cpt_ctx->snow3g;
1796 * Save initial space that followed app data for completion code &
1797 * alternate completion code to fall in same cache line as app data
1799 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1800 m_dma += COMPLETION_CODE_SIZE;
1801 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1804 c_vaddr = (uint8_t *)m_vaddr + size;
1805 c_dma = m_dma + size;
1806 size += sizeof(cpt_res_s_t);
1808 m_vaddr = (uint8_t *)m_vaddr + size;
1812 /* Reserve memory for cpt request info */
1815 size = sizeof(struct cpt_request_info);
1816 m_vaddr = (uint8_t *)m_vaddr + size;
1820 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1822 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1823 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1824 (0 << 3) | (flags & 0x7));
1826 /* consider iv len */
1827 encr_offset += iv_len;
1829 inputlen = encr_offset +
1830 (RTE_ALIGN(encr_data_len, 8) / 8);
1831 outputlen = inputlen;
1834 iv_s = params->iv_buf;
1837 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1838 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1841 for (j = 0; j < 4; j++)
1842 iv[j] = iv_s[3 - j];
1844 /* ZUC doesn't need a swap */
1845 for (j = 0; j < 4; j++)
1850 * GP op header, lengths are expected in bits.
1853 vq_cmd_w0.s.param1 = encr_data_len;
1856 * In 83XX since we have a limitation of
1857 * IV & Offset control word not part of instruction
1858 * and need to be part of Data Buffer, we check if
1859 * head room is there and then only do the Direct mode processing
1861 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1862 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1863 void *dm_vaddr = params->bufs[0].vaddr;
1864 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1866 * This flag indicates that there is 24 bytes head room and
1867 * 8 bytes tail room available, so that we get to do
1868 * DIRECT MODE with limitation
1871 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1872 OFF_CTRL_LEN - iv_len);
1873 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1876 req->ist.ei1 = offset_dma;
1877 /* RPTR should just exclude offset control word */
1878 req->ist.ei2 = dm_dma_addr - iv_len;
1879 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1880 + outputlen - iv_len);
1882 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1884 vq_cmd_w0.s.opcode = opcode.flags;
1886 if (likely(iv_len)) {
1887 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1889 memcpy(iv_d, iv, 16);
1892 /* iv offset is 0 */
1893 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1895 uint32_t i, g_size_bytes, s_size_bytes;
1896 uint64_t dptr_dma, rptr_dma;
1897 sg_comp_t *gather_comp;
1898 sg_comp_t *scatter_comp;
1902 /* save space for offset and iv... */
1903 offset_vaddr = m_vaddr;
1906 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1907 m_dma += OFF_CTRL_LEN + iv_len;
1908 m_size -= OFF_CTRL_LEN + iv_len;
1910 opcode.s.major |= CPT_DMA_MODE;
1912 vq_cmd_w0.s.opcode = opcode.flags;
1914 /* DPTR has SG list */
1915 in_buffer = m_vaddr;
1918 ((uint16_t *)in_buffer)[0] = 0;
1919 ((uint16_t *)in_buffer)[1] = 0;
1921 /* TODO Add error check if space will be sufficient */
1922 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1929 /* Offset control word */
1931 /* iv offset is 0 */
1932 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1934 i = fill_sg_comp(gather_comp, i, offset_dma,
1935 OFF_CTRL_LEN + iv_len);
1937 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1938 memcpy(iv_d, iv, 16);
1940 /* Add input data */
1941 size = inputlen - iv_len;
1943 i = fill_sg_comp_from_iov(gather_comp, i,
1946 if (unlikely(size)) {
1947 CPT_LOG_DP_ERR("Insufficient buffer space,"
1948 " size %d needed", size);
1952 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1953 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1956 * Output Scatter List
1961 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1964 i = fill_sg_comp(scatter_comp, i,
1965 offset_dma + OFF_CTRL_LEN,
1968 /* Add output data */
1969 size = outputlen - iv_len;
1971 i = fill_sg_comp_from_iov(scatter_comp, i,
1975 if (unlikely(size)) {
1976 CPT_LOG_DP_ERR("Insufficient buffer space,"
1977 " size %d needed", size);
1981 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1982 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1984 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1986 /* This is DPTR len incase of SG mode */
1987 vq_cmd_w0.s.dlen = size;
1989 m_vaddr = (uint8_t *)m_vaddr + size;
1993 /* cpt alternate completion address saved earlier */
1994 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1995 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1996 rptr_dma = c_dma - 8;
1998 req->ist.ei1 = dptr_dma;
1999 req->ist.ei2 = rptr_dma;
2004 vq_cmd_w3.s.grp = 0;
2005 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2006 offsetof(struct cpt_ctx, zs_ctx);
2008 /* 16 byte aligned cpt res address */
2009 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2010 *req->completion_addr = COMPLETION_CODE_INIT;
2011 req->comp_baddr = c_dma;
2013 /* Fill microcode part of instruction */
2014 req->ist.ei0 = vq_cmd_w0.u64;
2015 req->ist.ei3 = vq_cmd_w3.u64;
2023 static __rte_always_inline void
2024 cpt_kasumi_enc_prep(uint32_t req_flags,
2027 fc_params_t *params,
2032 int32_t inputlen = 0, outputlen = 0;
2033 struct cpt_ctx *cpt_ctx;
2034 uint32_t mac_len = 0;
2036 struct cpt_request_info *req;
2038 uint32_t encr_offset, auth_offset;
2039 uint32_t encr_data_len, auth_data_len;
2041 uint8_t *iv_s, *iv_d, iv_len = 8;
2043 void *m_vaddr, *c_vaddr;
2044 uint64_t m_dma, c_dma;
2045 uint64_t *offset_vaddr, offset_dma;
2046 vq_cmd_word0_t vq_cmd_w0;
2047 vq_cmd_word3_t vq_cmd_w3;
2048 opcode_info_t opcode;
2050 uint32_t g_size_bytes, s_size_bytes;
2051 uint64_t dptr_dma, rptr_dma;
2052 sg_comp_t *gather_comp;
2053 sg_comp_t *scatter_comp;
2055 buf_p = ¶ms->meta_buf;
2056 m_vaddr = buf_p->vaddr;
2057 m_dma = buf_p->dma_addr;
2058 m_size = buf_p->size;
2060 encr_offset = ENCR_OFFSET(d_offs) / 8;
2061 auth_offset = AUTH_OFFSET(d_offs) / 8;
2062 encr_data_len = ENCR_DLEN(d_lens);
2063 auth_data_len = AUTH_DLEN(d_lens);
2065 cpt_ctx = params->ctx_buf.vaddr;
2066 flags = cpt_ctx->zsk_flags;
2067 mac_len = cpt_ctx->mac_len;
2070 iv_s = params->iv_buf;
2072 iv_s = params->auth_iv_buf;
2074 dir = iv_s[8] & 0x1;
2077 * Save initial space that followed app data for completion code &
2078 * alternate completion code to fall in same cache line as app data
2080 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2081 m_dma += COMPLETION_CODE_SIZE;
2082 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2085 c_vaddr = (uint8_t *)m_vaddr + size;
2086 c_dma = m_dma + size;
2087 size += sizeof(cpt_res_s_t);
2089 m_vaddr = (uint8_t *)m_vaddr + size;
2093 /* Reserve memory for cpt request info */
2096 size = sizeof(struct cpt_request_info);
2097 m_vaddr = (uint8_t *)m_vaddr + size;
2101 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2103 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2104 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2105 (dir << 4) | (0 << 3) | (flags & 0x7));
2108 * GP op header, lengths are expected in bits.
2111 vq_cmd_w0.s.param1 = encr_data_len;
2112 vq_cmd_w0.s.param2 = auth_data_len;
2113 vq_cmd_w0.s.opcode = opcode.flags;
2115 /* consider iv len */
2117 encr_offset += iv_len;
2118 auth_offset += iv_len;
2121 /* save space for offset ctrl and iv */
2122 offset_vaddr = m_vaddr;
2125 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2126 m_dma += OFF_CTRL_LEN + iv_len;
2127 m_size -= OFF_CTRL_LEN + iv_len;
2129 /* DPTR has SG list */
2130 in_buffer = m_vaddr;
2133 ((uint16_t *)in_buffer)[0] = 0;
2134 ((uint16_t *)in_buffer)[1] = 0;
2136 /* TODO Add error check if space will be sufficient */
2137 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2144 /* Offset control word followed by iv */
2147 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2148 outputlen = inputlen;
2149 /* iv offset is 0 */
2150 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2152 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2153 outputlen = mac_len;
2154 /* iv offset is 0 */
2155 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2158 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2161 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2162 memcpy(iv_d, iv_s, iv_len);
2165 size = inputlen - iv_len;
2167 i = fill_sg_comp_from_iov(gather_comp, i,
2171 if (unlikely(size)) {
2172 CPT_LOG_DP_ERR("Insufficient buffer space,"
2173 " size %d needed", size);
2177 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2178 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2181 * Output Scatter List
2185 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2188 /* IV in SLIST only for F8 */
2194 i = fill_sg_comp(scatter_comp, i,
2195 offset_dma + OFF_CTRL_LEN,
2199 /* Add output data */
2200 if (req_flags & VALID_MAC_BUF) {
2201 size = outputlen - iv_len - mac_len;
2203 i = fill_sg_comp_from_iov(scatter_comp, i,
2207 if (unlikely(size)) {
2208 CPT_LOG_DP_ERR("Insufficient buffer space,"
2209 " size %d needed", size);
2216 i = fill_sg_comp_from_buf(scatter_comp, i,
2220 /* Output including mac */
2221 size = outputlen - iv_len;
2223 i = fill_sg_comp_from_iov(scatter_comp, i,
2227 if (unlikely(size)) {
2228 CPT_LOG_DP_ERR("Insufficient buffer space,"
2229 " size %d needed", size);
2234 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2235 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2237 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2239 /* This is DPTR len incase of SG mode */
2240 vq_cmd_w0.s.dlen = size;
2242 m_vaddr = (uint8_t *)m_vaddr + size;
2246 /* cpt alternate completion address saved earlier */
2247 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2248 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2249 rptr_dma = c_dma - 8;
2251 req->ist.ei1 = dptr_dma;
2252 req->ist.ei2 = rptr_dma;
2256 vq_cmd_w3.s.grp = 0;
2257 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2258 offsetof(struct cpt_ctx, k_ctx);
2260 /* 16 byte aligned cpt res address */
2261 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2262 *req->completion_addr = COMPLETION_CODE_INIT;
2263 req->comp_baddr = c_dma;
2265 /* Fill microcode part of instruction */
2266 req->ist.ei0 = vq_cmd_w0.u64;
2267 req->ist.ei3 = vq_cmd_w3.u64;
2275 static __rte_always_inline void
2276 cpt_kasumi_dec_prep(uint64_t d_offs,
2278 fc_params_t *params,
2283 int32_t inputlen = 0, outputlen;
2284 struct cpt_ctx *cpt_ctx;
2285 uint8_t i = 0, iv_len = 8;
2286 struct cpt_request_info *req;
2288 uint32_t encr_offset;
2289 uint32_t encr_data_len;
2292 void *m_vaddr, *c_vaddr;
2293 uint64_t m_dma, c_dma;
2294 uint64_t *offset_vaddr, offset_dma;
2295 vq_cmd_word0_t vq_cmd_w0;
2296 vq_cmd_word3_t vq_cmd_w3;
2297 opcode_info_t opcode;
2299 uint32_t g_size_bytes, s_size_bytes;
2300 uint64_t dptr_dma, rptr_dma;
2301 sg_comp_t *gather_comp;
2302 sg_comp_t *scatter_comp;
2304 buf_p = ¶ms->meta_buf;
2305 m_vaddr = buf_p->vaddr;
2306 m_dma = buf_p->dma_addr;
2307 m_size = buf_p->size;
2309 encr_offset = ENCR_OFFSET(d_offs) / 8;
2310 encr_data_len = ENCR_DLEN(d_lens);
2312 cpt_ctx = params->ctx_buf.vaddr;
2313 flags = cpt_ctx->zsk_flags;
2315 * Save initial space that followed app data for completion code &
2316 * alternate completion code to fall in same cache line as app data
2318 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2319 m_dma += COMPLETION_CODE_SIZE;
2320 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2323 c_vaddr = (uint8_t *)m_vaddr + size;
2324 c_dma = m_dma + size;
2325 size += sizeof(cpt_res_s_t);
2327 m_vaddr = (uint8_t *)m_vaddr + size;
2331 /* Reserve memory for cpt request info */
2334 size = sizeof(struct cpt_request_info);
2335 m_vaddr = (uint8_t *)m_vaddr + size;
2339 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2341 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2342 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2343 (dir << 4) | (0 << 3) | (flags & 0x7));
2346 * GP op header, lengths are expected in bits.
2349 vq_cmd_w0.s.param1 = encr_data_len;
2350 vq_cmd_w0.s.opcode = opcode.flags;
2352 /* consider iv len */
2353 encr_offset += iv_len;
2355 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2356 outputlen = inputlen;
2358 /* save space for offset ctrl & iv */
2359 offset_vaddr = m_vaddr;
2362 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2363 m_dma += OFF_CTRL_LEN + iv_len;
2364 m_size -= OFF_CTRL_LEN + iv_len;
2366 /* DPTR has SG list */
2367 in_buffer = m_vaddr;
2370 ((uint16_t *)in_buffer)[0] = 0;
2371 ((uint16_t *)in_buffer)[1] = 0;
2373 /* TODO Add error check if space will be sufficient */
2374 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2381 /* Offset control word followed by iv */
2382 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2384 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2387 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2388 params->iv_buf, iv_len);
2390 /* Add input data */
2391 size = inputlen - iv_len;
2393 i = fill_sg_comp_from_iov(gather_comp, i,
2396 if (unlikely(size)) {
2397 CPT_LOG_DP_ERR("Insufficient buffer space,"
2398 " size %d needed", size);
2402 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2403 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2406 * Output Scatter List
2410 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2413 i = fill_sg_comp(scatter_comp, i,
2414 offset_dma + OFF_CTRL_LEN,
2417 /* Add output data */
2418 size = outputlen - iv_len;
2420 i = fill_sg_comp_from_iov(scatter_comp, i,
2423 if (unlikely(size)) {
2424 CPT_LOG_DP_ERR("Insufficient buffer space,"
2425 " size %d needed", size);
2429 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2430 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2432 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2434 /* This is DPTR len incase of SG mode */
2435 vq_cmd_w0.s.dlen = size;
2437 m_vaddr = (uint8_t *)m_vaddr + size;
2441 /* cpt alternate completion address saved earlier */
2442 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2443 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2444 rptr_dma = c_dma - 8;
2446 req->ist.ei1 = dptr_dma;
2447 req->ist.ei2 = rptr_dma;
2451 vq_cmd_w3.s.grp = 0;
2452 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2453 offsetof(struct cpt_ctx, k_ctx);
2455 /* 16 byte aligned cpt res address */
2456 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2457 *req->completion_addr = COMPLETION_CODE_INIT;
2458 req->comp_baddr = c_dma;
2460 /* Fill microcode part of instruction */
2461 req->ist.ei0 = vq_cmd_w0.u64;
2462 req->ist.ei3 = vq_cmd_w3.u64;
2470 static __rte_always_inline void *
2471 cpt_fc_dec_hmac_prep(uint32_t flags,
2474 fc_params_t *fc_params,
2477 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2479 void *prep_req = NULL;
2481 fc_type = ctx->fc_type;
2483 if (likely(fc_type == FC_GEN)) {
2484 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2486 } else if (fc_type == ZUC_SNOW3G) {
2487 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2489 } else if (fc_type == KASUMI) {
2490 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2494 * For AUTH_ONLY case,
2495 * MC only supports digest generation and verification
2496 * should be done in software by memcmp()
2502 static __rte_always_inline void *__hot
2503 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2504 fc_params_t *fc_params, void *op)
2506 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2508 void *prep_req = NULL;
2510 fc_type = ctx->fc_type;
2512 /* Common api for rest of the ops */
2513 if (likely(fc_type == FC_GEN)) {
2514 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2516 } else if (fc_type == ZUC_SNOW3G) {
2517 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2519 } else if (fc_type == KASUMI) {
2520 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2522 } else if (fc_type == HASH_HMAC) {
2523 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2529 static __rte_always_inline int
2530 cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
2531 uint16_t key_len, uint16_t mac_len)
2533 struct cpt_ctx *cpt_ctx = ctx;
2534 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2535 uint64_t *ctrl_flags = NULL;
2537 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2542 /* No support for AEAD yet */
2543 if (cpt_ctx->enc_cipher)
2545 /* For ZUC/SNOW3G/Kasumi */
2548 cpt_ctx->snow3g = 1;
2549 gen_key_snow3g(key, keyx);
2550 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2551 cpt_ctx->fc_type = ZUC_SNOW3G;
2552 cpt_ctx->zsk_flags = 0x1;
2555 cpt_ctx->snow3g = 0;
2556 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2557 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2558 cpt_ctx->fc_type = ZUC_SNOW3G;
2559 cpt_ctx->zsk_flags = 0x1;
2562 /* Kasumi ECB mode */
2564 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2565 cpt_ctx->fc_type = KASUMI;
2566 cpt_ctx->zsk_flags = 0x1;
2569 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2570 cpt_ctx->fc_type = KASUMI;
2571 cpt_ctx->zsk_flags = 0x1;
2576 cpt_ctx->mac_len = 4;
2577 cpt_ctx->hash_type = type;
2581 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2582 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2583 cpt_ctx->fc_type = HASH_HMAC;
2586 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
2587 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
2589 /* For GMAC auth, cipher must be NULL */
2590 if (type == GMAC_TYPE)
2591 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
2593 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
2594 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
2598 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2599 memcpy(cpt_ctx->auth_key, key, key_len);
2600 cpt_ctx->auth_key_len = key_len;
2601 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2602 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2603 memcpy(fctx->hmac.opad, key, key_len);
2604 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
2606 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
2610 static __rte_always_inline int
2611 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2612 struct cpt_sess_misc *sess)
2614 struct rte_crypto_aead_xform *aead_form;
2615 cipher_type_t enc_type = 0; /* NULL Cipher type */
2616 auth_type_t auth_type = 0; /* NULL Auth type */
2617 uint32_t cipher_key_len = 0;
2618 uint8_t zsk_flag = 0, aes_gcm = 0;
2619 aead_form = &xform->aead;
2622 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2623 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2624 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2625 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2626 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2627 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2628 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2629 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2631 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2634 switch (aead_form->algo) {
2635 case RTE_CRYPTO_AEAD_AES_GCM:
2637 cipher_key_len = 16;
2640 case RTE_CRYPTO_AEAD_AES_CCM:
2641 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2645 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2649 if (aead_form->key.length < cipher_key_len) {
2650 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2651 (unsigned int long)aead_form->key.length);
2654 sess->zsk_flag = zsk_flag;
2655 sess->aes_gcm = aes_gcm;
2656 sess->mac_len = aead_form->digest_length;
2657 sess->iv_offset = aead_form->iv.offset;
2658 sess->iv_length = aead_form->iv.length;
2659 sess->aad_length = aead_form->aad_length;
2660 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2662 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2663 aead_form->key.length, NULL);
2665 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2670 static __rte_always_inline int
2671 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2672 struct cpt_sess_misc *sess)
2674 struct rte_crypto_cipher_xform *c_form;
2675 cipher_type_t enc_type = 0; /* NULL Cipher type */
2676 uint32_t cipher_key_len = 0;
2677 uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
2679 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
2682 c_form = &xform->cipher;
2684 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2685 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2686 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2687 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2689 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2693 switch (c_form->algo) {
2694 case RTE_CRYPTO_CIPHER_AES_CBC:
2696 cipher_key_len = 16;
2698 case RTE_CRYPTO_CIPHER_3DES_CBC:
2699 enc_type = DES3_CBC;
2700 cipher_key_len = 24;
2702 case RTE_CRYPTO_CIPHER_DES_CBC:
2703 /* DES is implemented using 3DES in hardware */
2704 enc_type = DES3_CBC;
2707 case RTE_CRYPTO_CIPHER_AES_CTR:
2709 cipher_key_len = 16;
2712 case RTE_CRYPTO_CIPHER_NULL:
2716 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2717 enc_type = KASUMI_F8_ECB;
2718 cipher_key_len = 16;
2721 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2722 enc_type = SNOW3G_UEA2;
2723 cipher_key_len = 16;
2726 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2727 enc_type = ZUC_EEA3;
2728 cipher_key_len = 16;
2731 case RTE_CRYPTO_CIPHER_AES_XTS:
2733 cipher_key_len = 16;
2735 case RTE_CRYPTO_CIPHER_3DES_ECB:
2736 enc_type = DES3_ECB;
2737 cipher_key_len = 24;
2739 case RTE_CRYPTO_CIPHER_AES_ECB:
2741 cipher_key_len = 16;
2743 case RTE_CRYPTO_CIPHER_3DES_CTR:
2744 case RTE_CRYPTO_CIPHER_AES_F8:
2745 case RTE_CRYPTO_CIPHER_ARC4:
2746 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2750 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2755 if (c_form->key.length < cipher_key_len) {
2756 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2757 (unsigned long) c_form->key.length);
2761 sess->zsk_flag = zsk_flag;
2762 sess->aes_gcm = aes_gcm;
2763 sess->aes_ctr = aes_ctr;
2764 sess->iv_offset = c_form->iv.offset;
2765 sess->iv_length = c_form->iv.length;
2766 sess->is_null = is_null;
2768 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2769 c_form->key.length, NULL);
2774 static __rte_always_inline int
2775 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2776 struct cpt_sess_misc *sess)
2778 struct rte_crypto_auth_xform *a_form;
2779 auth_type_t auth_type = 0; /* NULL Auth type */
2780 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2782 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2785 a_form = &xform->auth;
2787 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2788 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2789 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2790 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2792 CPT_LOG_DP_ERR("Unknown auth operation");
2796 if (a_form->key.length > 64) {
2797 CPT_LOG_DP_ERR("Auth key length is big");
2801 switch (a_form->algo) {
2802 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2804 case RTE_CRYPTO_AUTH_SHA1:
2805 auth_type = SHA1_TYPE;
2807 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2808 case RTE_CRYPTO_AUTH_SHA256:
2809 auth_type = SHA2_SHA256;
2811 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2812 case RTE_CRYPTO_AUTH_SHA512:
2813 auth_type = SHA2_SHA512;
2815 case RTE_CRYPTO_AUTH_AES_GMAC:
2816 auth_type = GMAC_TYPE;
2819 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2820 case RTE_CRYPTO_AUTH_SHA224:
2821 auth_type = SHA2_SHA224;
2823 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2824 case RTE_CRYPTO_AUTH_SHA384:
2825 auth_type = SHA2_SHA384;
2827 case RTE_CRYPTO_AUTH_MD5_HMAC:
2828 case RTE_CRYPTO_AUTH_MD5:
2829 auth_type = MD5_TYPE;
2831 case RTE_CRYPTO_AUTH_KASUMI_F9:
2832 auth_type = KASUMI_F9_ECB;
2834 * Indicate that direction needs to be taken out
2839 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2840 auth_type = SNOW3G_UIA2;
2843 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2844 auth_type = ZUC_EIA3;
2847 case RTE_CRYPTO_AUTH_NULL:
2851 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2852 case RTE_CRYPTO_AUTH_AES_CMAC:
2853 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2854 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2858 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2863 sess->zsk_flag = zsk_flag;
2864 sess->aes_gcm = aes_gcm;
2865 sess->mac_len = a_form->digest_length;
2866 sess->is_null = is_null;
2868 sess->auth_iv_offset = a_form->iv.offset;
2869 sess->auth_iv_length = a_form->iv.length;
2871 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2872 a_form->key.length, a_form->digest_length);
2880 static __rte_always_inline int
2881 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2882 struct cpt_sess_misc *sess)
2884 struct rte_crypto_auth_xform *a_form;
2885 cipher_type_t enc_type = 0; /* NULL Cipher type */
2886 auth_type_t auth_type = 0; /* NULL Auth type */
2887 uint8_t zsk_flag = 0, aes_gcm = 0;
2890 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2893 a_form = &xform->auth;
2895 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2896 sess->cpt_op |= CPT_OP_ENCODE;
2897 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2898 sess->cpt_op |= CPT_OP_DECODE;
2900 CPT_LOG_DP_ERR("Unknown auth operation");
2904 switch (a_form->algo) {
2905 case RTE_CRYPTO_AUTH_AES_GMAC:
2907 auth_type = GMAC_TYPE;
2910 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2915 sess->zsk_flag = zsk_flag;
2916 sess->aes_gcm = aes_gcm;
2918 sess->iv_offset = a_form->iv.offset;
2919 sess->iv_length = a_form->iv.length;
2920 sess->mac_len = a_form->digest_length;
2921 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2923 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2924 a_form->key.length, NULL);
2925 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2930 static __rte_always_inline void *
2931 alloc_op_meta(struct rte_mbuf *m_src,
2934 struct rte_mempool *cpt_meta_pool)
2938 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2939 if (likely(m_src && (m_src->nb_segs == 1))) {
2943 /* Check if tailroom is sufficient to hold meta data */
2944 tailroom = rte_pktmbuf_tailroom(m_src);
2945 if (likely(tailroom > len + 8)) {
2946 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2947 mphys = m_src->buf_physaddr + m_src->buf_len;
2951 buf->dma_addr = mphys;
2953 /* Indicate that this is a mbuf allocated mdata */
2954 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2959 RTE_SET_USED(m_src);
2962 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2966 buf->dma_addr = rte_mempool_virt2iova(mdata);
2973 * cpt_free_metabuf - free metabuf to mempool.
2974 * @param instance: pointer to instance.
2975 * @param objp: pointer to the metabuf.
2977 static __rte_always_inline void
2978 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2980 bool nofree = ((uintptr_t)mdata & 1ull);
2984 rte_mempool_put(cpt_meta_pool, mdata);
2987 static __rte_always_inline uint32_t
2988 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2989 iov_ptr_t *iovec, uint32_t start_offset)
2992 void *seg_data = NULL;
2993 phys_addr_t seg_phys;
2994 int32_t seg_size = 0;
3001 if (!start_offset) {
3002 seg_data = rte_pktmbuf_mtod(pkt, void *);
3003 seg_phys = rte_pktmbuf_mtophys(pkt);
3004 seg_size = pkt->data_len;
3006 while (start_offset >= pkt->data_len) {
3007 start_offset -= pkt->data_len;
3011 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
3012 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
3013 seg_size = pkt->data_len - start_offset;
3019 iovec->bufs[index].vaddr = seg_data;
3020 iovec->bufs[index].dma_addr = seg_phys;
3021 iovec->bufs[index].size = seg_size;
3025 while (unlikely(pkt != NULL)) {
3026 seg_data = rte_pktmbuf_mtod(pkt, void *);
3027 seg_phys = rte_pktmbuf_mtophys(pkt);
3028 seg_size = pkt->data_len;
3032 iovec->bufs[index].vaddr = seg_data;
3033 iovec->bufs[index].dma_addr = seg_phys;
3034 iovec->bufs[index].size = seg_size;
3041 iovec->buf_cnt = index;
3045 static __rte_always_inline uint32_t
3046 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3051 void *seg_data = NULL;
3052 phys_addr_t seg_phys;
3053 uint32_t seg_size = 0;
3056 seg_data = rte_pktmbuf_mtod(pkt, void *);
3057 seg_phys = rte_pktmbuf_mtophys(pkt);
3058 seg_size = pkt->data_len;
3061 if (likely(!pkt->next)) {
3062 uint32_t headroom, tailroom;
3064 *flags |= SINGLE_BUF_INPLACE;
3065 headroom = rte_pktmbuf_headroom(pkt);
3066 tailroom = rte_pktmbuf_tailroom(pkt);
3067 if (likely((headroom >= 24) &&
3069 /* In 83XX this is prerequivisit for Direct mode */
3070 *flags |= SINGLE_BUF_HEADTAILROOM;
3072 param->bufs[0].vaddr = seg_data;
3073 param->bufs[0].dma_addr = seg_phys;
3074 param->bufs[0].size = seg_size;
3077 iovec = param->src_iov;
3078 iovec->bufs[index].vaddr = seg_data;
3079 iovec->bufs[index].dma_addr = seg_phys;
3080 iovec->bufs[index].size = seg_size;
3084 while (unlikely(pkt != NULL)) {
3085 seg_data = rte_pktmbuf_mtod(pkt, void *);
3086 seg_phys = rte_pktmbuf_mtophys(pkt);
3087 seg_size = pkt->data_len;
3092 iovec->bufs[index].vaddr = seg_data;
3093 iovec->bufs[index].dma_addr = seg_phys;
3094 iovec->bufs[index].size = seg_size;
3101 iovec->buf_cnt = index;
3105 static __rte_always_inline int
3106 fill_fc_params(struct rte_crypto_op *cop,
3107 struct cpt_sess_misc *sess_misc,
3108 struct cpt_qp_meta_info *m_info,
3113 struct rte_crypto_sym_op *sym_op = cop->sym;
3116 uint32_t mc_hash_off;
3118 uint64_t d_offs, d_lens;
3119 struct rte_mbuf *m_src, *m_dst;
3120 uint8_t cpt_op = sess_misc->cpt_op;
3121 uint8_t zsk_flag = sess_misc->zsk_flag;
3122 uint8_t aes_gcm = sess_misc->aes_gcm;
3123 uint16_t mac_len = sess_misc->mac_len;
3124 #ifdef CPT_ALWAYS_USE_SG_MODE
3125 uint8_t inplace = 0;
3127 uint8_t inplace = 1;
3129 fc_params_t fc_params;
3130 char src[SRC_IOV_SIZE];
3131 char dst[SRC_IOV_SIZE];
3135 if (likely(sess_misc->iv_length)) {
3136 flags |= VALID_IV_BUF;
3137 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3138 uint8_t *, sess_misc->iv_offset);
3139 if (sess_misc->aes_ctr &&
3140 unlikely(sess_misc->iv_length != 16)) {
3141 memcpy((uint8_t *)iv_buf,
3142 rte_crypto_op_ctod_offset(cop,
3143 uint8_t *, sess_misc->iv_offset), 12);
3144 iv_buf[3] = rte_cpu_to_be_32(0x1);
3145 fc_params.iv_buf = iv_buf;
3150 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3152 sess_misc->auth_iv_offset);
3153 if (zsk_flag == K_F9) {
3154 CPT_LOG_DP_ERR("Should not reach here for "
3157 if (zsk_flag != ZS_EA)
3160 m_src = sym_op->m_src;
3161 m_dst = sym_op->m_dst;
3168 d_offs = sym_op->aead.data.offset;
3169 d_lens = sym_op->aead.data.length;
3170 mc_hash_off = sym_op->aead.data.offset +
3171 sym_op->aead.data.length;
3173 aad_data = sym_op->aead.aad.data;
3174 aad_len = sess_misc->aad_length;
3175 if (likely((aad_data + aad_len) ==
3176 rte_pktmbuf_mtod_offset(m_src,
3178 sym_op->aead.data.offset))) {
3179 d_offs = (d_offs - aad_len) | (d_offs << 16);
3180 d_lens = (d_lens + aad_len) | (d_lens << 32);
3182 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3183 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3184 fc_params.aad_buf.size = aad_len;
3185 flags |= VALID_AAD_BUF;
3187 d_offs = d_offs << 16;
3188 d_lens = d_lens << 32;
3191 salt = fc_params.iv_buf;
3192 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3193 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3194 sess_misc->salt = *(uint32_t *)salt;
3196 fc_params.iv_buf = salt + 4;
3197 if (likely(mac_len)) {
3198 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3204 /* hmac immediately following data is best case */
3205 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3207 (uint8_t *)sym_op->aead.digest.data)) {
3208 flags |= VALID_MAC_BUF;
3209 fc_params.mac_buf.size = sess_misc->mac_len;
3210 fc_params.mac_buf.vaddr =
3211 sym_op->aead.digest.data;
3212 fc_params.mac_buf.dma_addr =
3213 sym_op->aead.digest.phys_addr;
3218 d_offs = sym_op->cipher.data.offset;
3219 d_lens = sym_op->cipher.data.length;
3220 mc_hash_off = sym_op->cipher.data.offset +
3221 sym_op->cipher.data.length;
3222 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3223 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3225 if (mc_hash_off < (sym_op->auth.data.offset +
3226 sym_op->auth.data.length)){
3227 mc_hash_off = (sym_op->auth.data.offset +
3228 sym_op->auth.data.length);
3230 /* for gmac, salt should be updated like in gcm */
3231 if (unlikely(sess_misc->is_gmac)) {
3233 salt = fc_params.iv_buf;
3234 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3235 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3236 sess_misc->salt = *(uint32_t *)salt;
3238 fc_params.iv_buf = salt + 4;
3240 if (likely(mac_len)) {
3243 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3247 /* hmac immediately following data is best case */
3248 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3250 (uint8_t *)sym_op->auth.digest.data)) {
3251 flags |= VALID_MAC_BUF;
3252 fc_params.mac_buf.size =
3254 fc_params.mac_buf.vaddr =
3255 sym_op->auth.digest.data;
3256 fc_params.mac_buf.dma_addr =
3257 sym_op->auth.digest.phys_addr;
3262 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3263 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3265 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3268 if (likely(!m_dst && inplace)) {
3269 /* Case of single buffer without AAD buf or
3270 * separate mac buf in place and
3273 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3275 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3278 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3284 /* Out of place processing */
3285 fc_params.src_iov = (void *)src;
3286 fc_params.dst_iov = (void *)dst;
3288 /* Store SG I/O in the api for reuse */
3289 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3290 CPT_LOG_DP_ERR("Prepare src iov failed");
3295 if (unlikely(m_dst != NULL)) {
3298 /* Try to make room as much as src has */
3299 m_dst = sym_op->m_dst;
3300 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3302 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3303 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3304 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3305 CPT_LOG_DP_ERR("Not enough space in "
3314 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3315 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3321 fc_params.dst_iov = (void *)src;
3325 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3326 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3327 m_info->lb_mlen, m_info->pool);
3329 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3330 m_info->sg_mlen, m_info->pool);
3332 if (unlikely(mdata == NULL)) {
3333 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3338 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3339 op[0] = (uintptr_t)mdata;
3340 op[1] = (uintptr_t)cop;
3341 op[2] = op[3] = 0; /* Used to indicate auth verify */
3342 space += 4 * sizeof(uint64_t);
3344 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3345 fc_params.meta_buf.dma_addr += space;
3346 fc_params.meta_buf.size -= space;
3348 /* Finally prepare the instruction */
3349 if (cpt_op & CPT_OP_ENCODE)
3350 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3353 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3356 if (unlikely(*prep_req == NULL)) {
3357 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3359 goto free_mdata_and_exit;
3366 free_mdata_and_exit:
3367 free_op_meta(mdata, m_info->pool);
3372 static __rte_always_inline void
3373 compl_auth_verify(struct rte_crypto_op *op,
3378 struct rte_crypto_sym_op *sym_op = op->sym;
3380 if (sym_op->auth.digest.data)
3381 mac = sym_op->auth.digest.data;
3383 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3385 sym_op->auth.data.length +
3386 sym_op->auth.data.offset);
3388 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3392 if (memcmp(mac, gen_mac, mac_len))
3393 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3395 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3398 static __rte_always_inline int
3399 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3401 struct rte_crypto_sym_xform *chain;
3403 CPT_PMD_INIT_FUNC_TRACE();
3405 if (cpt_is_algo_supported(xform))
3410 switch (chain->type) {
3411 case RTE_CRYPTO_SYM_XFORM_AEAD:
3412 if (fill_sess_aead(chain, sess))
3415 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3416 if (fill_sess_cipher(chain, sess))
3419 case RTE_CRYPTO_SYM_XFORM_AUTH:
3420 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3421 if (fill_sess_gmac(chain, sess))
3424 if (fill_sess_auth(chain, sess))
3429 CPT_LOG_DP_ERR("Invalid crypto xform type");
3432 chain = chain->next;
3441 static __rte_always_inline void
3442 find_kasumif9_direction_and_length(uint8_t *src,
3443 uint32_t counter_num_bytes,
3444 uint32_t *addr_length_in_bits,
3445 uint8_t *addr_direction)
3450 while (!found && counter_num_bytes > 0) {
3451 counter_num_bytes--;
3452 if (src[counter_num_bytes] == 0x00)
3454 pos = rte_bsf32(src[counter_num_bytes]);
3456 if (likely(counter_num_bytes > 0)) {
3457 last_byte = src[counter_num_bytes - 1];
3458 *addr_direction = last_byte & 0x1;
3459 *addr_length_in_bits = counter_num_bytes * 8
3463 last_byte = src[counter_num_bytes];
3464 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3465 *addr_length_in_bits = counter_num_bytes * 8
3473 * This handles all auth only except AES_GMAC
3475 static __rte_always_inline int
3476 fill_digest_params(struct rte_crypto_op *cop,
3477 struct cpt_sess_misc *sess,
3478 struct cpt_qp_meta_info *m_info,
3483 struct rte_crypto_sym_op *sym_op = cop->sym;
3487 uint32_t auth_range_off;
3489 uint64_t d_offs = 0, d_lens;
3490 struct rte_mbuf *m_src, *m_dst;
3491 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3492 uint8_t zsk_flag = sess->zsk_flag;
3493 uint16_t mac_len = sess->mac_len;
3495 char src[SRC_IOV_SIZE];
3499 memset(¶ms, 0, sizeof(fc_params_t));
3501 m_src = sym_op->m_src;
3503 /* For just digest lets force mempool alloc */
3504 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3506 if (mdata == NULL) {
3511 mphys = params.meta_buf.dma_addr;
3514 op[0] = (uintptr_t)mdata;
3515 op[1] = (uintptr_t)cop;
3516 op[2] = op[3] = 0; /* Used to indicate auth verify */
3517 space += 4 * sizeof(uint64_t);
3519 auth_range_off = sym_op->auth.data.offset;
3521 flags = VALID_MAC_BUF;
3522 params.src_iov = (void *)src;
3523 if (unlikely(zsk_flag)) {
3525 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3526 * we will send pass through even for auth only case,
3529 d_offs = auth_range_off;
3531 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3532 uint8_t *, sess->auth_iv_offset);
3533 if (zsk_flag == K_F9) {
3534 uint32_t length_in_bits, num_bytes;
3535 uint8_t *src, direction = 0;
3536 uint32_t counter_num_bytes;
3538 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3541 * This is kasumi f9, take direction from
3544 length_in_bits = cop->sym->auth.data.length;
3545 num_bytes = (length_in_bits >> 3);
3546 counter_num_bytes = num_bytes;
3547 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3548 find_kasumif9_direction_and_length(src,
3552 length_in_bits -= 64;
3553 cop->sym->auth.data.offset += 64;
3554 d_offs = cop->sym->auth.data.offset;
3555 auth_range_off = d_offs / 8;
3556 cop->sym->auth.data.length = length_in_bits;
3558 /* Store it at end of auth iv */
3559 iv_buf[8] = direction;
3560 params.auth_iv_buf = iv_buf;
3564 d_lens = sym_op->auth.data.length;
3566 params.ctx_buf.vaddr = SESS_PRIV(sess);
3567 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3569 if (auth_op == CPT_OP_AUTH_GENERATE) {
3570 if (sym_op->auth.digest.data) {
3572 * Digest to be generated
3573 * in separate buffer
3575 params.mac_buf.size =
3577 params.mac_buf.vaddr =
3578 sym_op->auth.digest.data;
3579 params.mac_buf.dma_addr =
3580 sym_op->auth.digest.phys_addr;
3582 uint32_t off = sym_op->auth.data.offset +
3583 sym_op->auth.data.length;
3584 int32_t dlen, space;
3586 m_dst = sym_op->m_dst ?
3587 sym_op->m_dst : sym_op->m_src;
3588 dlen = rte_pktmbuf_pkt_len(m_dst);
3590 space = off + mac_len - dlen;
3592 if (!rte_pktmbuf_append(m_dst, space)) {
3593 CPT_LOG_DP_ERR("Failed to extend "
3594 "mbuf by %uB", space);
3596 goto free_mdata_and_exit;
3599 params.mac_buf.vaddr =
3600 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3601 params.mac_buf.dma_addr =
3602 rte_pktmbuf_mtophys_offset(m_dst, off);
3603 params.mac_buf.size = mac_len;
3606 /* Need space for storing generated mac */
3607 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3608 params.mac_buf.dma_addr = mphys + space;
3609 params.mac_buf.size = mac_len;
3610 space += RTE_ALIGN_CEIL(mac_len, 8);
3611 op[2] = (uintptr_t)params.mac_buf.vaddr;
3615 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3616 params.meta_buf.dma_addr = mphys + space;
3617 params.meta_buf.size -= space;
3619 /* Out of place processing */
3620 params.src_iov = (void *)src;
3622 /*Store SG I/O in the api for reuse */
3623 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3624 CPT_LOG_DP_ERR("Prepare src iov failed");
3626 goto free_mdata_and_exit;
3629 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3630 if (unlikely(*prep_req == NULL)) {
3632 goto free_mdata_and_exit;
3639 free_mdata_and_exit:
3640 free_op_meta(mdata, m_info->pool);
3645 #endif /*_CPT_UCODE_H_ */