1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
29 * Microcode only supports the following combination.
30 * Encryption followed by authentication
31 * Authentication followed by decryption
34 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 /* Unsupported as of now by microcode */
38 CPT_LOG_DP_ERR("Unsupported combination");
41 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 /* For GMAC auth there is no cipher operation */
45 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 xform->next->auth.algo !=
47 RTE_CRYPTO_AUTH_AES_GMAC) {
48 /* Unsupported as of now by microcode */
49 CPT_LOG_DP_ERR("Unsupported combination");
57 static __rte_always_inline void
58 gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
62 for (i = 0; i < 4; i++) {
64 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 (ck[base + 2] << 8) | (ck[base + 3]);
66 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
74 struct cpt_ctx *cpt_ctx = ctx;
75 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
91 static __rte_always_inline int
92 cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
109 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
114 key_len = key_len / 2;
115 if (unlikely(key_len == CPT_BYTE_24)) {
116 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
119 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 if (unlikely(key_len != 16))
127 /* No support for AEAD yet */
128 if (unlikely(cpt_ctx->hash_type))
130 fc_type = ZUC_SNOW3G;
134 if (unlikely(key_len != 16))
136 /* No support for AEAD yet */
137 if (unlikely(cpt_ctx->hash_type))
147 static __rte_always_inline void
148 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
150 cpt_ctx->enc_cipher = 0;
151 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
154 static __rte_always_inline void
155 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
157 mc_aes_type_t aes_key_type = 0;
160 aes_key_type = AES_128_BIT;
163 aes_key_type = AES_192_BIT;
166 aes_key_type = AES_256_BIT;
169 /* This should not happen */
170 CPT_LOG_DP_ERR("Invalid AES key len");
173 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
176 static __rte_always_inline void
177 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
182 gen_key_snow3g(key, keyx);
183 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
184 cpt_ctx->fc_type = ZUC_SNOW3G;
185 cpt_ctx->zsk_flags = 0;
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
193 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 cpt_ctx->fc_type = ZUC_SNOW3G;
196 cpt_ctx->zsk_flags = 0;
199 static __rte_always_inline void
200 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
204 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
205 cpt_ctx->zsk_flags = 0;
206 cpt_ctx->fc_type = KASUMI;
209 static __rte_always_inline void
210 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
213 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
214 cpt_ctx->zsk_flags = 0;
215 cpt_ctx->fc_type = KASUMI;
218 static __rte_always_inline int
219 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
220 uint16_t key_len, uint8_t *salt)
222 struct cpt_ctx *cpt_ctx = ctx;
223 mc_fc_context_t *fctx = &cpt_ctx->fctx;
224 uint64_t *ctrl_flags = NULL;
227 /* Validate key before proceeding */
228 fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
229 if (unlikely(fc_type == -1))
232 if (fc_type == FC_GEN) {
233 cpt_ctx->fc_type = FC_GEN;
234 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
235 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
237 * We need to always say IV is from DPTR as user can
238 * sometimes iverride IV per operation.
240 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
245 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
248 /* CPT performs DES using 3DES with the 8B DES-key
249 * replicated 2 more times to match the 24B 3DES-key.
250 * Eg. If org. key is "0x0a 0x0b", then new key is
251 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
254 /* Skipping the first 8B as it will be copied
255 * in the regular code flow
257 memcpy(fctx->enc.encr_key+key_len, key, key_len);
258 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
262 /* For DES3_ECB IV need to be from CTX. */
263 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
269 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
272 /* Even though iv source is from dptr,
273 * aes_gcm salt is taken from ctx
276 memcpy(fctx->enc.encr_iv, salt, 4);
277 /* Assuming it was just salt update
283 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
286 key_len = key_len / 2;
287 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
289 /* Copy key2 for XTS into ipad */
290 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
291 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
294 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
297 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
300 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
303 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
309 /* Only for FC_GEN case */
311 /* For GMAC auth, cipher must be NULL */
312 if (cpt_ctx->hash_type != GMAC_TYPE)
313 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
315 memcpy(fctx->enc.encr_key, key, key_len);
318 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
321 cpt_ctx->enc_cipher = type;
326 static __rte_always_inline uint32_t
327 fill_sg_comp(sg_comp_t *list,
329 phys_addr_t dma_addr,
332 sg_comp_t *to = &list[i>>2];
334 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
335 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
340 static __rte_always_inline uint32_t
341 fill_sg_comp_from_buf(sg_comp_t *list,
345 sg_comp_t *to = &list[i>>2];
347 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
348 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
353 static __rte_always_inline uint32_t
354 fill_sg_comp_from_buf_min(sg_comp_t *list,
359 sg_comp_t *to = &list[i >> 2];
360 uint32_t size = *psize;
363 e_len = (size > from->size) ? from->size : size;
364 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
365 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
372 * This fills the MC expected SGIO list
373 * from IOV given by user.
375 static __rte_always_inline uint32_t
376 fill_sg_comp_from_iov(sg_comp_t *list,
378 iov_ptr_t *from, uint32_t from_offset,
379 uint32_t *psize, buf_ptr_t *extra_buf,
380 uint32_t extra_offset)
383 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
384 uint32_t size = *psize - extra_len;
388 for (j = 0; (j < from->buf_cnt) && size; j++) {
389 phys_addr_t e_dma_addr;
391 sg_comp_t *to = &list[i >> 2];
396 if (unlikely(from_offset)) {
397 if (from_offset >= bufs[j].size) {
398 from_offset -= bufs[j].size;
401 e_dma_addr = bufs[j].dma_addr + from_offset;
402 e_len = (size > (bufs[j].size - from_offset)) ?
403 (bufs[j].size - from_offset) : size;
406 e_dma_addr = bufs[j].dma_addr;
407 e_len = (size > bufs[j].size) ?
411 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
412 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
414 if (extra_len && (e_len >= extra_offset)) {
415 /* Break the data at given offset */
416 uint32_t next_len = e_len - extra_offset;
417 phys_addr_t next_dma = e_dma_addr + extra_offset;
422 e_len = extra_offset;
424 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
427 /* Insert extra data ptr */
432 rte_cpu_to_be_16(extra_buf->size);
434 rte_cpu_to_be_64(extra_buf->dma_addr);
436 /* size already decremented by extra len */
439 /* insert the rest of the data */
443 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
444 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
453 extra_offset -= size;
461 static __rte_always_inline void
462 cpt_digest_gen_prep(uint32_t flags,
464 digest_params_t *params,
468 struct cpt_request_info *req;
471 uint16_t data_len, mac_len, key_len;
472 auth_type_t hash_type;
475 sg_comp_t *gather_comp;
476 sg_comp_t *scatter_comp;
478 uint32_t g_size_bytes, s_size_bytes;
479 uint64_t dptr_dma, rptr_dma;
480 vq_cmd_word0_t vq_cmd_w0;
481 vq_cmd_word3_t vq_cmd_w3;
482 void *c_vaddr, *m_vaddr;
483 uint64_t c_dma, m_dma;
484 opcode_info_t opcode;
486 ctx = params->ctx_buf.vaddr;
487 meta_p = ¶ms->meta_buf;
489 m_vaddr = meta_p->vaddr;
490 m_dma = meta_p->dma_addr;
491 m_size = meta_p->size;
494 * Save initial space that followed app data for completion code &
495 * alternate completion code to fall in same cache line as app data
497 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
498 m_dma += COMPLETION_CODE_SIZE;
499 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
501 c_vaddr = (uint8_t *)m_vaddr + size;
502 c_dma = m_dma + size;
503 size += sizeof(cpt_res_s_t);
505 m_vaddr = (uint8_t *)m_vaddr + size;
511 size = sizeof(struct cpt_request_info);
512 m_vaddr = (uint8_t *)m_vaddr + size;
516 hash_type = ctx->hash_type;
517 mac_len = ctx->mac_len;
518 key_len = ctx->auth_key_len;
519 data_len = AUTH_DLEN(d_lens);
523 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
525 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
526 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
528 rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
530 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
531 vq_cmd_w0.s.param1 = 0;
532 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
537 /* Null auth only case enters the if */
538 if (unlikely(!hash_type && !ctx->enc_cipher)) {
539 opcode.s.major = CPT_MAJOR_OP_MISC;
540 /* Minor op is passthrough */
541 opcode.s.minor = 0x03;
542 /* Send out completion code only */
543 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(0x1);
546 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
548 /* DPTR has SG list */
552 ((uint16_t *)in_buffer)[0] = 0;
553 ((uint16_t *)in_buffer)[1] = 0;
555 /* TODO Add error check if space will be sufficient */
556 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
565 uint64_t k_dma = params->ctx_buf.dma_addr +
566 offsetof(struct cpt_ctx, auth_key);
568 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
574 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
576 if (unlikely(size)) {
577 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
583 * Looks like we need to support zero data
584 * gather ptr in case of hash & hmac
588 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
589 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
596 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
598 if (flags & VALID_MAC_BUF) {
599 if (unlikely(params->mac_buf.size < mac_len)) {
600 CPT_LOG_DP_ERR("Insufficient MAC size");
605 i = fill_sg_comp_from_buf_min(scatter_comp, i,
606 ¶ms->mac_buf, &size);
609 i = fill_sg_comp_from_iov(scatter_comp, i,
610 params->src_iov, data_len,
612 if (unlikely(size)) {
613 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
619 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
620 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
622 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
624 /* This is DPTR len incase of SG mode */
625 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
627 m_vaddr = (uint8_t *)m_vaddr + size;
631 /* cpt alternate completion address saved earlier */
632 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
633 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
634 rptr_dma = c_dma - 8;
636 req->ist.ei1 = dptr_dma;
637 req->ist.ei2 = rptr_dma;
638 /* First 16-bit swap then 64-bit swap */
639 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
640 * to eliminate all the swapping
642 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
647 /* 16 byte aligned cpt res address */
648 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
649 *req->completion_addr = COMPLETION_CODE_INIT;
650 req->comp_baddr = c_dma;
652 /* Fill microcode part of instruction */
653 req->ist.ei0 = vq_cmd_w0.u64;
654 req->ist.ei3 = vq_cmd_w3.u64;
662 static __rte_always_inline void
663 cpt_enc_hmac_prep(uint32_t flags,
666 fc_params_t *fc_params,
670 uint32_t iv_offset = 0;
671 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
672 struct cpt_ctx *cpt_ctx;
673 uint32_t cipher_type, hash_type;
674 uint32_t mac_len, size;
676 struct cpt_request_info *req;
677 buf_ptr_t *meta_p, *aad_buf = NULL;
678 uint32_t encr_offset, auth_offset;
679 uint32_t encr_data_len, auth_data_len, aad_len = 0;
680 uint32_t passthrough_len = 0;
681 void *m_vaddr, *offset_vaddr;
682 uint64_t m_dma, offset_dma, ctx_dma;
683 vq_cmd_word0_t vq_cmd_w0;
684 vq_cmd_word3_t vq_cmd_w3;
688 opcode_info_t opcode;
690 meta_p = &fc_params->meta_buf;
691 m_vaddr = meta_p->vaddr;
692 m_dma = meta_p->dma_addr;
693 m_size = meta_p->size;
695 encr_offset = ENCR_OFFSET(d_offs);
696 auth_offset = AUTH_OFFSET(d_offs);
697 encr_data_len = ENCR_DLEN(d_lens);
698 auth_data_len = AUTH_DLEN(d_lens);
699 if (unlikely(flags & VALID_AAD_BUF)) {
701 * We dont support both aad
702 * and auth data separately
706 aad_len = fc_params->aad_buf.size;
707 aad_buf = &fc_params->aad_buf;
709 cpt_ctx = fc_params->ctx_buf.vaddr;
710 cipher_type = cpt_ctx->enc_cipher;
711 hash_type = cpt_ctx->hash_type;
712 mac_len = cpt_ctx->mac_len;
715 * Save initial space that followed app data for completion code &
716 * alternate completion code to fall in same cache line as app data
718 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
719 m_dma += COMPLETION_CODE_SIZE;
720 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
723 c_vaddr = (uint8_t *)m_vaddr + size;
724 c_dma = m_dma + size;
725 size += sizeof(cpt_res_s_t);
727 m_vaddr = (uint8_t *)m_vaddr + size;
731 /* start cpt request info struct at 8 byte boundary */
732 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
735 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
737 size += sizeof(struct cpt_request_info);
738 m_vaddr = (uint8_t *)m_vaddr + size;
742 if (hash_type == GMAC_TYPE)
745 if (unlikely(!(flags & VALID_IV_BUF))) {
747 iv_offset = ENCR_IV_OFFSET(d_offs);
750 if (unlikely(flags & VALID_AAD_BUF)) {
752 * When AAD is given, data above encr_offset is pass through
753 * Since AAD is given as separate pointer and not as offset,
754 * this is a special case as we need to fragment input data
755 * into passthrough + encr_data and then insert AAD in between.
757 if (hash_type != GMAC_TYPE) {
758 passthrough_len = encr_offset;
759 auth_offset = passthrough_len + iv_len;
760 encr_offset = passthrough_len + aad_len + iv_len;
761 auth_data_len = aad_len + encr_data_len;
763 passthrough_len = 16 + aad_len;
764 auth_offset = passthrough_len + iv_len;
765 auth_data_len = aad_len;
768 encr_offset += iv_len;
769 auth_offset += iv_len;
773 opcode.s.major = CPT_MAJOR_OP_FC;
776 auth_dlen = auth_offset + auth_data_len;
777 enc_dlen = encr_data_len + encr_offset;
778 if (unlikely(encr_data_len & 0xf)) {
779 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
780 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
781 else if (likely((cipher_type == AES_CBC) ||
782 (cipher_type == AES_ECB)))
783 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
786 if (unlikely(hash_type == GMAC_TYPE)) {
787 encr_offset = auth_dlen;
791 if (unlikely(auth_dlen > enc_dlen)) {
792 inputlen = auth_dlen;
793 outputlen = auth_dlen + mac_len;
796 outputlen = enc_dlen + mac_len;
801 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
802 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
804 * In 83XX since we have a limitation of
805 * IV & Offset control word not part of instruction
806 * and need to be part of Data Buffer, we check if
807 * head room is there and then only do the Direct mode processing
809 if (likely((flags & SINGLE_BUF_INPLACE) &&
810 (flags & SINGLE_BUF_HEADTAILROOM))) {
811 void *dm_vaddr = fc_params->bufs[0].vaddr;
812 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
814 * This flag indicates that there is 24 bytes head room and
815 * 8 bytes tail room available, so that we get to do
816 * DIRECT MODE with limitation
819 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
820 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
823 req->ist.ei1 = offset_dma;
824 /* RPTR should just exclude offset control word */
825 req->ist.ei2 = dm_dma_addr - iv_len;
826 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
827 + outputlen - iv_len);
829 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
831 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
833 if (likely(iv_len)) {
834 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
836 uint64_t *src = fc_params->iv_buf;
841 *(uint64_t *)offset_vaddr =
842 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
843 ((uint64_t)iv_offset << 8) |
844 ((uint64_t)auth_offset));
847 uint32_t i, g_size_bytes, s_size_bytes;
848 uint64_t dptr_dma, rptr_dma;
849 sg_comp_t *gather_comp;
850 sg_comp_t *scatter_comp;
853 /* This falls under strict SG mode */
854 offset_vaddr = m_vaddr;
856 size = OFF_CTRL_LEN + iv_len;
858 m_vaddr = (uint8_t *)m_vaddr + size;
862 opcode.s.major |= CPT_DMA_MODE;
864 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
866 if (likely(iv_len)) {
867 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
869 uint64_t *src = fc_params->iv_buf;
874 *(uint64_t *)offset_vaddr =
875 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
876 ((uint64_t)iv_offset << 8) |
877 ((uint64_t)auth_offset));
879 /* DPTR has SG list */
883 ((uint16_t *)in_buffer)[0] = 0;
884 ((uint16_t *)in_buffer)[1] = 0;
886 /* TODO Add error check if space will be sufficient */
887 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
895 /* Offset control word that includes iv */
896 i = fill_sg_comp(gather_comp, i, offset_dma,
897 OFF_CTRL_LEN + iv_len);
900 size = inputlen - iv_len;
902 uint32_t aad_offset = aad_len ? passthrough_len : 0;
904 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
905 i = fill_sg_comp_from_buf_min(gather_comp, i,
909 i = fill_sg_comp_from_iov(gather_comp, i,
912 aad_buf, aad_offset);
915 if (unlikely(size)) {
916 CPT_LOG_DP_ERR("Insufficient buffer space,"
917 " size %d needed", size);
921 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
922 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
925 * Output Scatter list
929 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
932 if (likely(iv_len)) {
933 i = fill_sg_comp(scatter_comp, i,
934 offset_dma + OFF_CTRL_LEN,
938 /* output data or output data + digest*/
939 if (unlikely(flags & VALID_MAC_BUF)) {
940 size = outputlen - iv_len - mac_len;
942 uint32_t aad_offset =
943 aad_len ? passthrough_len : 0;
945 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
946 i = fill_sg_comp_from_buf_min(
952 i = fill_sg_comp_from_iov(scatter_comp,
960 if (unlikely(size)) {
961 CPT_LOG_DP_ERR("Insufficient buffer"
962 " space, size %d needed",
969 i = fill_sg_comp_from_buf(scatter_comp, i,
970 &fc_params->mac_buf);
973 /* Output including mac */
974 size = outputlen - iv_len;
976 uint32_t aad_offset =
977 aad_len ? passthrough_len : 0;
979 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
980 i = fill_sg_comp_from_buf_min(
986 i = fill_sg_comp_from_iov(scatter_comp,
994 if (unlikely(size)) {
995 CPT_LOG_DP_ERR("Insufficient buffer"
996 " space, size %d needed",
1002 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1003 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1005 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1007 /* This is DPTR len incase of SG mode */
1008 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1010 m_vaddr = (uint8_t *)m_vaddr + size;
1014 /* cpt alternate completion address saved earlier */
1015 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1016 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1017 rptr_dma = c_dma - 8;
1019 req->ist.ei1 = dptr_dma;
1020 req->ist.ei2 = rptr_dma;
1023 /* First 16-bit swap then 64-bit swap */
1024 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1025 * to eliminate all the swapping
1027 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1029 ctx_dma = fc_params->ctx_buf.dma_addr +
1030 offsetof(struct cpt_ctx, fctx);
1033 vq_cmd_w3.s.grp = 0;
1034 vq_cmd_w3.s.cptr = ctx_dma;
1036 /* 16 byte aligned cpt res address */
1037 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1038 *req->completion_addr = COMPLETION_CODE_INIT;
1039 req->comp_baddr = c_dma;
1041 /* Fill microcode part of instruction */
1042 req->ist.ei0 = vq_cmd_w0.u64;
1043 req->ist.ei3 = vq_cmd_w3.u64;
1051 static __rte_always_inline void
1052 cpt_dec_hmac_prep(uint32_t flags,
1055 fc_params_t *fc_params,
1059 uint32_t iv_offset = 0, size;
1060 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1061 struct cpt_ctx *cpt_ctx;
1062 int32_t hash_type, mac_len, m_size;
1063 uint8_t iv_len = 16;
1064 struct cpt_request_info *req;
1065 buf_ptr_t *meta_p, *aad_buf = NULL;
1066 uint32_t encr_offset, auth_offset;
1067 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1068 uint32_t passthrough_len = 0;
1069 void *m_vaddr, *offset_vaddr;
1070 uint64_t m_dma, offset_dma, ctx_dma;
1071 opcode_info_t opcode;
1072 vq_cmd_word0_t vq_cmd_w0;
1073 vq_cmd_word3_t vq_cmd_w3;
1077 meta_p = &fc_params->meta_buf;
1078 m_vaddr = meta_p->vaddr;
1079 m_dma = meta_p->dma_addr;
1080 m_size = meta_p->size;
1082 encr_offset = ENCR_OFFSET(d_offs);
1083 auth_offset = AUTH_OFFSET(d_offs);
1084 encr_data_len = ENCR_DLEN(d_lens);
1085 auth_data_len = AUTH_DLEN(d_lens);
1087 if (unlikely(flags & VALID_AAD_BUF)) {
1089 * We dont support both aad
1090 * and auth data separately
1094 aad_len = fc_params->aad_buf.size;
1095 aad_buf = &fc_params->aad_buf;
1098 cpt_ctx = fc_params->ctx_buf.vaddr;
1099 hash_type = cpt_ctx->hash_type;
1100 mac_len = cpt_ctx->mac_len;
1102 if (hash_type == GMAC_TYPE)
1105 if (unlikely(!(flags & VALID_IV_BUF))) {
1107 iv_offset = ENCR_IV_OFFSET(d_offs);
1110 if (unlikely(flags & VALID_AAD_BUF)) {
1112 * When AAD is given, data above encr_offset is pass through
1113 * Since AAD is given as separate pointer and not as offset,
1114 * this is a special case as we need to fragment input data
1115 * into passthrough + encr_data and then insert AAD in between.
1117 if (hash_type != GMAC_TYPE) {
1118 passthrough_len = encr_offset;
1119 auth_offset = passthrough_len + iv_len;
1120 encr_offset = passthrough_len + aad_len + iv_len;
1121 auth_data_len = aad_len + encr_data_len;
1123 passthrough_len = 16 + aad_len;
1124 auth_offset = passthrough_len + iv_len;
1125 auth_data_len = aad_len;
1128 encr_offset += iv_len;
1129 auth_offset += iv_len;
1133 * Save initial space that followed app data for completion code &
1134 * alternate completion code to fall in same cache line as app data
1136 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1137 m_dma += COMPLETION_CODE_SIZE;
1138 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1140 c_vaddr = (uint8_t *)m_vaddr + size;
1141 c_dma = m_dma + size;
1142 size += sizeof(cpt_res_s_t);
1144 m_vaddr = (uint8_t *)m_vaddr + size;
1148 /* start cpt request info structure at 8 byte alignment */
1149 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1152 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1154 size += sizeof(struct cpt_request_info);
1155 m_vaddr = (uint8_t *)m_vaddr + size;
1160 opcode.s.major = CPT_MAJOR_OP_FC;
1163 enc_dlen = encr_offset + encr_data_len;
1164 auth_dlen = auth_offset + auth_data_len;
1166 if (auth_dlen > enc_dlen) {
1167 inputlen = auth_dlen + mac_len;
1168 outputlen = auth_dlen;
1170 inputlen = enc_dlen + mac_len;
1171 outputlen = enc_dlen;
1174 if (hash_type == GMAC_TYPE)
1175 encr_offset = inputlen;
1178 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1179 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1182 * In 83XX since we have a limitation of
1183 * IV & Offset control word not part of instruction
1184 * and need to be part of Data Buffer, we check if
1185 * head room is there and then only do the Direct mode processing
1187 if (likely((flags & SINGLE_BUF_INPLACE) &&
1188 (flags & SINGLE_BUF_HEADTAILROOM))) {
1189 void *dm_vaddr = fc_params->bufs[0].vaddr;
1190 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1192 * This flag indicates that there is 24 bytes head room and
1193 * 8 bytes tail room available, so that we get to do
1194 * DIRECT MODE with limitation
1197 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1198 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1199 req->ist.ei1 = offset_dma;
1201 /* RPTR should just exclude offset control word */
1202 req->ist.ei2 = dm_dma_addr - iv_len;
1204 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1205 outputlen - iv_len);
1206 /* since this is decryption,
1207 * don't touch the content of
1208 * alternate ccode space as it contains
1212 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1214 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1216 if (likely(iv_len)) {
1217 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1219 uint64_t *src = fc_params->iv_buf;
1224 *(uint64_t *)offset_vaddr =
1225 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1226 ((uint64_t)iv_offset << 8) |
1227 ((uint64_t)auth_offset));
1230 uint64_t dptr_dma, rptr_dma;
1231 uint32_t g_size_bytes, s_size_bytes;
1232 sg_comp_t *gather_comp;
1233 sg_comp_t *scatter_comp;
1237 /* This falls under strict SG mode */
1238 offset_vaddr = m_vaddr;
1240 size = OFF_CTRL_LEN + iv_len;
1242 m_vaddr = (uint8_t *)m_vaddr + size;
1246 opcode.s.major |= CPT_DMA_MODE;
1248 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1250 if (likely(iv_len)) {
1251 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1253 uint64_t *src = fc_params->iv_buf;
1258 *(uint64_t *)offset_vaddr =
1259 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1260 ((uint64_t)iv_offset << 8) |
1261 ((uint64_t)auth_offset));
1263 /* DPTR has SG list */
1264 in_buffer = m_vaddr;
1267 ((uint16_t *)in_buffer)[0] = 0;
1268 ((uint16_t *)in_buffer)[1] = 0;
1270 /* TODO Add error check if space will be sufficient */
1271 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1278 /* Offset control word that includes iv */
1279 i = fill_sg_comp(gather_comp, i, offset_dma,
1280 OFF_CTRL_LEN + iv_len);
1282 /* Add input data */
1283 if (flags & VALID_MAC_BUF) {
1284 size = inputlen - iv_len - mac_len;
1286 /* input data only */
1287 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1288 i = fill_sg_comp_from_buf_min(
1293 uint32_t aad_offset = aad_len ?
1294 passthrough_len : 0;
1296 i = fill_sg_comp_from_iov(gather_comp,
1303 if (unlikely(size)) {
1304 CPT_LOG_DP_ERR("Insufficient buffer"
1305 " space, size %d needed",
1313 i = fill_sg_comp_from_buf(gather_comp, i,
1314 &fc_params->mac_buf);
1317 /* input data + mac */
1318 size = inputlen - iv_len;
1320 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1321 i = fill_sg_comp_from_buf_min(
1326 uint32_t aad_offset = aad_len ?
1327 passthrough_len : 0;
1329 if (unlikely(!fc_params->src_iov)) {
1330 CPT_LOG_DP_ERR("Bad input args");
1334 i = fill_sg_comp_from_iov(
1342 if (unlikely(size)) {
1343 CPT_LOG_DP_ERR("Insufficient buffer"
1344 " space, size %d needed",
1350 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1351 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1354 * Output Scatter List
1359 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1363 i = fill_sg_comp(scatter_comp, i,
1364 offset_dma + OFF_CTRL_LEN,
1368 /* Add output data */
1369 size = outputlen - iv_len;
1371 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1372 /* handle single buffer here */
1373 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1377 uint32_t aad_offset = aad_len ?
1378 passthrough_len : 0;
1380 if (unlikely(!fc_params->dst_iov)) {
1381 CPT_LOG_DP_ERR("Bad input args");
1385 i = fill_sg_comp_from_iov(scatter_comp, i,
1386 fc_params->dst_iov, 0,
1391 if (unlikely(size)) {
1392 CPT_LOG_DP_ERR("Insufficient buffer space,"
1393 " size %d needed", size);
1398 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1399 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1401 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1403 /* This is DPTR len incase of SG mode */
1404 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1406 m_vaddr = (uint8_t *)m_vaddr + size;
1410 /* cpt alternate completion address saved earlier */
1411 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1412 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1413 rptr_dma = c_dma - 8;
1414 size += COMPLETION_CODE_SIZE;
1416 req->ist.ei1 = dptr_dma;
1417 req->ist.ei2 = rptr_dma;
1420 /* First 16-bit swap then 64-bit swap */
1421 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1422 * to eliminate all the swapping
1424 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1426 ctx_dma = fc_params->ctx_buf.dma_addr +
1427 offsetof(struct cpt_ctx, fctx);
1430 vq_cmd_w3.s.grp = 0;
1431 vq_cmd_w3.s.cptr = ctx_dma;
1433 /* 16 byte aligned cpt res address */
1434 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1435 *req->completion_addr = COMPLETION_CODE_INIT;
1436 req->comp_baddr = c_dma;
1438 /* Fill microcode part of instruction */
1439 req->ist.ei0 = vq_cmd_w0.u64;
1440 req->ist.ei3 = vq_cmd_w3.u64;
1448 static __rte_always_inline void
1449 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1452 fc_params_t *params,
1457 int32_t inputlen, outputlen;
1458 struct cpt_ctx *cpt_ctx;
1459 uint32_t mac_len = 0;
1461 struct cpt_request_info *req;
1463 uint32_t encr_offset = 0, auth_offset = 0;
1464 uint32_t encr_data_len = 0, auth_data_len = 0;
1465 int flags, iv_len = 16, m_size;
1466 void *m_vaddr, *c_vaddr;
1467 uint64_t m_dma, c_dma, offset_ctrl;
1468 uint64_t *offset_vaddr, offset_dma;
1469 uint32_t *iv_s, iv[4];
1470 vq_cmd_word0_t vq_cmd_w0;
1471 vq_cmd_word3_t vq_cmd_w3;
1472 opcode_info_t opcode;
1474 buf_p = ¶ms->meta_buf;
1475 m_vaddr = buf_p->vaddr;
1476 m_dma = buf_p->dma_addr;
1477 m_size = buf_p->size;
1479 cpt_ctx = params->ctx_buf.vaddr;
1480 flags = cpt_ctx->zsk_flags;
1481 mac_len = cpt_ctx->mac_len;
1482 snow3g = cpt_ctx->snow3g;
1485 * Save initial space that followed app data for completion code &
1486 * alternate completion code to fall in same cache line as app data
1488 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1489 m_dma += COMPLETION_CODE_SIZE;
1490 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1493 c_vaddr = (uint8_t *)m_vaddr + size;
1494 c_dma = m_dma + size;
1495 size += sizeof(cpt_res_s_t);
1497 m_vaddr = (uint8_t *)m_vaddr + size;
1501 /* Reserve memory for cpt request info */
1504 size = sizeof(struct cpt_request_info);
1505 m_vaddr = (uint8_t *)m_vaddr + size;
1509 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1511 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1512 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1513 (0 << 3) | (flags & 0x7));
1517 * Microcode expects offsets in bytes
1518 * TODO: Rounding off
1520 auth_data_len = AUTH_DLEN(d_lens);
1523 auth_offset = AUTH_OFFSET(d_offs);
1524 auth_offset = auth_offset / 8;
1526 /* consider iv len */
1527 auth_offset += iv_len;
1529 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1530 outputlen = mac_len;
1532 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1537 * Microcode expects offsets in bytes
1538 * TODO: Rounding off
1540 encr_data_len = ENCR_DLEN(d_lens);
1542 encr_offset = ENCR_OFFSET(d_offs);
1543 encr_offset = encr_offset / 8;
1544 /* consider iv len */
1545 encr_offset += iv_len;
1547 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1548 outputlen = inputlen;
1550 /* iv offset is 0 */
1551 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1555 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1560 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1561 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1564 for (j = 0; j < 4; j++)
1565 iv[j] = iv_s[3 - j];
1567 /* ZUC doesn't need a swap */
1568 for (j = 0; j < 4; j++)
1573 * GP op header, lengths are expected in bits.
1576 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1577 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1580 * In 83XX since we have a limitation of
1581 * IV & Offset control word not part of instruction
1582 * and need to be part of Data Buffer, we check if
1583 * head room is there and then only do the Direct mode processing
1585 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1586 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1587 void *dm_vaddr = params->bufs[0].vaddr;
1588 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1590 * This flag indicates that there is 24 bytes head room and
1591 * 8 bytes tail room available, so that we get to do
1592 * DIRECT MODE with limitation
1595 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1596 OFF_CTRL_LEN - iv_len);
1597 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1600 req->ist.ei1 = offset_dma;
1601 /* RPTR should just exclude offset control word */
1602 req->ist.ei2 = dm_dma_addr - iv_len;
1603 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1604 + outputlen - iv_len);
1606 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1608 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1610 if (likely(iv_len)) {
1611 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1613 memcpy(iv_d, iv, 16);
1616 *offset_vaddr = offset_ctrl;
1618 uint32_t i, g_size_bytes, s_size_bytes;
1619 uint64_t dptr_dma, rptr_dma;
1620 sg_comp_t *gather_comp;
1621 sg_comp_t *scatter_comp;
1625 /* save space for iv */
1626 offset_vaddr = m_vaddr;
1629 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1630 m_dma += OFF_CTRL_LEN + iv_len;
1631 m_size -= OFF_CTRL_LEN + iv_len;
1633 opcode.s.major |= CPT_DMA_MODE;
1635 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1637 /* DPTR has SG list */
1638 in_buffer = m_vaddr;
1641 ((uint16_t *)in_buffer)[0] = 0;
1642 ((uint16_t *)in_buffer)[1] = 0;
1644 /* TODO Add error check if space will be sufficient */
1645 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1652 /* Offset control word followed by iv */
1654 i = fill_sg_comp(gather_comp, i, offset_dma,
1655 OFF_CTRL_LEN + iv_len);
1657 /* iv offset is 0 */
1658 *offset_vaddr = offset_ctrl;
1660 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1661 memcpy(iv_d, iv, 16);
1664 size = inputlen - iv_len;
1666 i = fill_sg_comp_from_iov(gather_comp, i,
1669 if (unlikely(size)) {
1670 CPT_LOG_DP_ERR("Insufficient buffer space,"
1671 " size %d needed", size);
1675 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1676 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1679 * Output Scatter List
1684 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1687 /* IV in SLIST only for EEA3 & UEA2 */
1692 i = fill_sg_comp(scatter_comp, i,
1693 offset_dma + OFF_CTRL_LEN, iv_len);
1696 /* Add output data */
1697 if (req_flags & VALID_MAC_BUF) {
1698 size = outputlen - iv_len - mac_len;
1700 i = fill_sg_comp_from_iov(scatter_comp, i,
1704 if (unlikely(size)) {
1705 CPT_LOG_DP_ERR("Insufficient buffer space,"
1706 " size %d needed", size);
1713 i = fill_sg_comp_from_buf(scatter_comp, i,
1717 /* Output including mac */
1718 size = outputlen - iv_len;
1720 i = fill_sg_comp_from_iov(scatter_comp, i,
1724 if (unlikely(size)) {
1725 CPT_LOG_DP_ERR("Insufficient buffer space,"
1726 " size %d needed", size);
1731 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1732 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1734 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1736 /* This is DPTR len incase of SG mode */
1737 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1739 m_vaddr = (uint8_t *)m_vaddr + size;
1743 /* cpt alternate completion address saved earlier */
1744 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1745 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1746 rptr_dma = c_dma - 8;
1748 req->ist.ei1 = dptr_dma;
1749 req->ist.ei2 = rptr_dma;
1752 /* First 16-bit swap then 64-bit swap */
1753 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1754 * to eliminate all the swapping
1756 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1760 vq_cmd_w3.s.grp = 0;
1761 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1762 offsetof(struct cpt_ctx, zs_ctx);
1764 /* 16 byte aligned cpt res address */
1765 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1766 *req->completion_addr = COMPLETION_CODE_INIT;
1767 req->comp_baddr = c_dma;
1769 /* Fill microcode part of instruction */
1770 req->ist.ei0 = vq_cmd_w0.u64;
1771 req->ist.ei3 = vq_cmd_w3.u64;
1779 static __rte_always_inline void
1780 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1783 fc_params_t *params,
1788 int32_t inputlen = 0, outputlen;
1789 struct cpt_ctx *cpt_ctx;
1790 uint8_t snow3g, iv_len = 16;
1791 struct cpt_request_info *req;
1793 uint32_t encr_offset;
1794 uint32_t encr_data_len;
1796 void *m_vaddr, *c_vaddr;
1797 uint64_t m_dma, c_dma;
1798 uint64_t *offset_vaddr, offset_dma;
1799 uint32_t *iv_s, iv[4], j;
1800 vq_cmd_word0_t vq_cmd_w0;
1801 vq_cmd_word3_t vq_cmd_w3;
1802 opcode_info_t opcode;
1804 buf_p = ¶ms->meta_buf;
1805 m_vaddr = buf_p->vaddr;
1806 m_dma = buf_p->dma_addr;
1807 m_size = buf_p->size;
1810 * Microcode expects offsets in bytes
1811 * TODO: Rounding off
1813 encr_offset = ENCR_OFFSET(d_offs) / 8;
1814 encr_data_len = ENCR_DLEN(d_lens);
1816 cpt_ctx = params->ctx_buf.vaddr;
1817 flags = cpt_ctx->zsk_flags;
1818 snow3g = cpt_ctx->snow3g;
1820 * Save initial space that followed app data for completion code &
1821 * alternate completion code to fall in same cache line as app data
1823 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1824 m_dma += COMPLETION_CODE_SIZE;
1825 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1828 c_vaddr = (uint8_t *)m_vaddr + size;
1829 c_dma = m_dma + size;
1830 size += sizeof(cpt_res_s_t);
1832 m_vaddr = (uint8_t *)m_vaddr + size;
1836 /* Reserve memory for cpt request info */
1839 size = sizeof(struct cpt_request_info);
1840 m_vaddr = (uint8_t *)m_vaddr + size;
1844 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1846 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1847 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1848 (0 << 3) | (flags & 0x7));
1850 /* consider iv len */
1851 encr_offset += iv_len;
1853 inputlen = encr_offset +
1854 (RTE_ALIGN(encr_data_len, 8) / 8);
1855 outputlen = inputlen;
1858 iv_s = params->iv_buf;
1861 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1862 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1865 for (j = 0; j < 4; j++)
1866 iv[j] = iv_s[3 - j];
1868 /* ZUC doesn't need a swap */
1869 for (j = 0; j < 4; j++)
1874 * GP op header, lengths are expected in bits.
1877 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1880 * In 83XX since we have a limitation of
1881 * IV & Offset control word not part of instruction
1882 * and need to be part of Data Buffer, we check if
1883 * head room is there and then only do the Direct mode processing
1885 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1886 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1887 void *dm_vaddr = params->bufs[0].vaddr;
1888 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1890 * This flag indicates that there is 24 bytes head room and
1891 * 8 bytes tail room available, so that we get to do
1892 * DIRECT MODE with limitation
1895 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1896 OFF_CTRL_LEN - iv_len);
1897 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1900 req->ist.ei1 = offset_dma;
1901 /* RPTR should just exclude offset control word */
1902 req->ist.ei2 = dm_dma_addr - iv_len;
1903 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1904 + outputlen - iv_len);
1906 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1908 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1910 if (likely(iv_len)) {
1911 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1913 memcpy(iv_d, iv, 16);
1916 /* iv offset is 0 */
1917 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1919 uint32_t i, g_size_bytes, s_size_bytes;
1920 uint64_t dptr_dma, rptr_dma;
1921 sg_comp_t *gather_comp;
1922 sg_comp_t *scatter_comp;
1926 /* save space for offset and iv... */
1927 offset_vaddr = m_vaddr;
1930 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1931 m_dma += OFF_CTRL_LEN + iv_len;
1932 m_size -= OFF_CTRL_LEN + iv_len;
1934 opcode.s.major |= CPT_DMA_MODE;
1936 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1938 /* DPTR has SG list */
1939 in_buffer = m_vaddr;
1942 ((uint16_t *)in_buffer)[0] = 0;
1943 ((uint16_t *)in_buffer)[1] = 0;
1945 /* TODO Add error check if space will be sufficient */
1946 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1953 /* Offset control word */
1955 /* iv offset is 0 */
1956 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1958 i = fill_sg_comp(gather_comp, i, offset_dma,
1959 OFF_CTRL_LEN + iv_len);
1961 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1962 memcpy(iv_d, iv, 16);
1964 /* Add input data */
1965 size = inputlen - iv_len;
1967 i = fill_sg_comp_from_iov(gather_comp, i,
1970 if (unlikely(size)) {
1971 CPT_LOG_DP_ERR("Insufficient buffer space,"
1972 " size %d needed", size);
1976 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1977 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1980 * Output Scatter List
1985 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1988 i = fill_sg_comp(scatter_comp, i,
1989 offset_dma + OFF_CTRL_LEN,
1992 /* Add output data */
1993 size = outputlen - iv_len;
1995 i = fill_sg_comp_from_iov(scatter_comp, i,
1999 if (unlikely(size)) {
2000 CPT_LOG_DP_ERR("Insufficient buffer space,"
2001 " size %d needed", size);
2005 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2006 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2008 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2010 /* This is DPTR len incase of SG mode */
2011 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2013 m_vaddr = (uint8_t *)m_vaddr + size;
2017 /* cpt alternate completion address saved earlier */
2018 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2019 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2020 rptr_dma = c_dma - 8;
2022 req->ist.ei1 = dptr_dma;
2023 req->ist.ei2 = rptr_dma;
2026 /* First 16-bit swap then 64-bit swap */
2027 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2028 * to eliminate all the swapping
2030 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2034 vq_cmd_w3.s.grp = 0;
2035 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2036 offsetof(struct cpt_ctx, zs_ctx);
2038 /* 16 byte aligned cpt res address */
2039 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2040 *req->completion_addr = COMPLETION_CODE_INIT;
2041 req->comp_baddr = c_dma;
2043 /* Fill microcode part of instruction */
2044 req->ist.ei0 = vq_cmd_w0.u64;
2045 req->ist.ei3 = vq_cmd_w3.u64;
2053 static __rte_always_inline void
2054 cpt_kasumi_enc_prep(uint32_t req_flags,
2057 fc_params_t *params,
2062 int32_t inputlen = 0, outputlen = 0;
2063 struct cpt_ctx *cpt_ctx;
2064 uint32_t mac_len = 0;
2066 struct cpt_request_info *req;
2068 uint32_t encr_offset, auth_offset;
2069 uint32_t encr_data_len, auth_data_len;
2071 uint8_t *iv_s, *iv_d, iv_len = 8;
2073 void *m_vaddr, *c_vaddr;
2074 uint64_t m_dma, c_dma;
2075 uint64_t *offset_vaddr, offset_dma;
2076 vq_cmd_word0_t vq_cmd_w0;
2077 vq_cmd_word3_t vq_cmd_w3;
2078 opcode_info_t opcode;
2080 uint32_t g_size_bytes, s_size_bytes;
2081 uint64_t dptr_dma, rptr_dma;
2082 sg_comp_t *gather_comp;
2083 sg_comp_t *scatter_comp;
2085 buf_p = ¶ms->meta_buf;
2086 m_vaddr = buf_p->vaddr;
2087 m_dma = buf_p->dma_addr;
2088 m_size = buf_p->size;
2090 encr_offset = ENCR_OFFSET(d_offs) / 8;
2091 auth_offset = AUTH_OFFSET(d_offs) / 8;
2092 encr_data_len = ENCR_DLEN(d_lens);
2093 auth_data_len = AUTH_DLEN(d_lens);
2095 cpt_ctx = params->ctx_buf.vaddr;
2096 flags = cpt_ctx->zsk_flags;
2097 mac_len = cpt_ctx->mac_len;
2100 iv_s = params->iv_buf;
2102 iv_s = params->auth_iv_buf;
2104 dir = iv_s[8] & 0x1;
2107 * Save initial space that followed app data for completion code &
2108 * alternate completion code to fall in same cache line as app data
2110 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2111 m_dma += COMPLETION_CODE_SIZE;
2112 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2115 c_vaddr = (uint8_t *)m_vaddr + size;
2116 c_dma = m_dma + size;
2117 size += sizeof(cpt_res_s_t);
2119 m_vaddr = (uint8_t *)m_vaddr + size;
2123 /* Reserve memory for cpt request info */
2126 size = sizeof(struct cpt_request_info);
2127 m_vaddr = (uint8_t *)m_vaddr + size;
2131 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2133 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2134 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2135 (dir << 4) | (0 << 3) | (flags & 0x7));
2138 * GP op header, lengths are expected in bits.
2141 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
2142 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
2143 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
2145 /* consider iv len */
2147 encr_offset += iv_len;
2148 auth_offset += iv_len;
2151 /* save space for offset ctrl and iv */
2152 offset_vaddr = m_vaddr;
2155 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2156 m_dma += OFF_CTRL_LEN + iv_len;
2157 m_size -= OFF_CTRL_LEN + iv_len;
2159 /* DPTR has SG list */
2160 in_buffer = m_vaddr;
2163 ((uint16_t *)in_buffer)[0] = 0;
2164 ((uint16_t *)in_buffer)[1] = 0;
2166 /* TODO Add error check if space will be sufficient */
2167 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2174 /* Offset control word followed by iv */
2177 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2178 outputlen = inputlen;
2179 /* iv offset is 0 */
2180 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2182 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2183 outputlen = mac_len;
2184 /* iv offset is 0 */
2185 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2188 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2191 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2192 memcpy(iv_d, iv_s, iv_len);
2195 size = inputlen - iv_len;
2197 i = fill_sg_comp_from_iov(gather_comp, i,
2201 if (unlikely(size)) {
2202 CPT_LOG_DP_ERR("Insufficient buffer space,"
2203 " size %d needed", size);
2207 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2208 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2211 * Output Scatter List
2215 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2218 /* IV in SLIST only for F8 */
2224 i = fill_sg_comp(scatter_comp, i,
2225 offset_dma + OFF_CTRL_LEN,
2229 /* Add output data */
2230 if (req_flags & VALID_MAC_BUF) {
2231 size = outputlen - iv_len - mac_len;
2233 i = fill_sg_comp_from_iov(scatter_comp, i,
2237 if (unlikely(size)) {
2238 CPT_LOG_DP_ERR("Insufficient buffer space,"
2239 " size %d needed", size);
2246 i = fill_sg_comp_from_buf(scatter_comp, i,
2250 /* Output including mac */
2251 size = outputlen - iv_len;
2253 i = fill_sg_comp_from_iov(scatter_comp, i,
2257 if (unlikely(size)) {
2258 CPT_LOG_DP_ERR("Insufficient buffer space,"
2259 " size %d needed", size);
2264 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2265 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2267 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2269 /* This is DPTR len incase of SG mode */
2270 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2272 m_vaddr = (uint8_t *)m_vaddr + size;
2276 /* cpt alternate completion address saved earlier */
2277 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2278 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2279 rptr_dma = c_dma - 8;
2281 req->ist.ei1 = dptr_dma;
2282 req->ist.ei2 = rptr_dma;
2284 /* First 16-bit swap then 64-bit swap */
2285 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2286 * to eliminate all the swapping
2288 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2292 vq_cmd_w3.s.grp = 0;
2293 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2294 offsetof(struct cpt_ctx, k_ctx);
2296 /* 16 byte aligned cpt res address */
2297 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2298 *req->completion_addr = COMPLETION_CODE_INIT;
2299 req->comp_baddr = c_dma;
2301 /* Fill microcode part of instruction */
2302 req->ist.ei0 = vq_cmd_w0.u64;
2303 req->ist.ei3 = vq_cmd_w3.u64;
2311 static __rte_always_inline void
2312 cpt_kasumi_dec_prep(uint64_t d_offs,
2314 fc_params_t *params,
2319 int32_t inputlen = 0, outputlen;
2320 struct cpt_ctx *cpt_ctx;
2321 uint8_t i = 0, iv_len = 8;
2322 struct cpt_request_info *req;
2324 uint32_t encr_offset;
2325 uint32_t encr_data_len;
2328 void *m_vaddr, *c_vaddr;
2329 uint64_t m_dma, c_dma;
2330 uint64_t *offset_vaddr, offset_dma;
2331 vq_cmd_word0_t vq_cmd_w0;
2332 vq_cmd_word3_t vq_cmd_w3;
2333 opcode_info_t opcode;
2335 uint32_t g_size_bytes, s_size_bytes;
2336 uint64_t dptr_dma, rptr_dma;
2337 sg_comp_t *gather_comp;
2338 sg_comp_t *scatter_comp;
2340 buf_p = ¶ms->meta_buf;
2341 m_vaddr = buf_p->vaddr;
2342 m_dma = buf_p->dma_addr;
2343 m_size = buf_p->size;
2345 encr_offset = ENCR_OFFSET(d_offs) / 8;
2346 encr_data_len = ENCR_DLEN(d_lens);
2348 cpt_ctx = params->ctx_buf.vaddr;
2349 flags = cpt_ctx->zsk_flags;
2351 * Save initial space that followed app data for completion code &
2352 * alternate completion code to fall in same cache line as app data
2354 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2355 m_dma += COMPLETION_CODE_SIZE;
2356 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2359 c_vaddr = (uint8_t *)m_vaddr + size;
2360 c_dma = m_dma + size;
2361 size += sizeof(cpt_res_s_t);
2363 m_vaddr = (uint8_t *)m_vaddr + size;
2367 /* Reserve memory for cpt request info */
2370 size = sizeof(struct cpt_request_info);
2371 m_vaddr = (uint8_t *)m_vaddr + size;
2375 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2377 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2378 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2379 (dir << 4) | (0 << 3) | (flags & 0x7));
2382 * GP op header, lengths are expected in bits.
2385 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
2386 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
2388 /* consider iv len */
2389 encr_offset += iv_len;
2391 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2392 outputlen = inputlen;
2394 /* save space for offset ctrl & iv */
2395 offset_vaddr = m_vaddr;
2398 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2399 m_dma += OFF_CTRL_LEN + iv_len;
2400 m_size -= OFF_CTRL_LEN + iv_len;
2402 /* DPTR has SG list */
2403 in_buffer = m_vaddr;
2406 ((uint16_t *)in_buffer)[0] = 0;
2407 ((uint16_t *)in_buffer)[1] = 0;
2409 /* TODO Add error check if space will be sufficient */
2410 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2417 /* Offset control word followed by iv */
2418 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2420 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2423 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2424 params->iv_buf, iv_len);
2426 /* Add input data */
2427 size = inputlen - iv_len;
2429 i = fill_sg_comp_from_iov(gather_comp, i,
2432 if (unlikely(size)) {
2433 CPT_LOG_DP_ERR("Insufficient buffer space,"
2434 " size %d needed", size);
2438 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2439 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2442 * Output Scatter List
2446 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2449 i = fill_sg_comp(scatter_comp, i,
2450 offset_dma + OFF_CTRL_LEN,
2453 /* Add output data */
2454 size = outputlen - iv_len;
2456 i = fill_sg_comp_from_iov(scatter_comp, i,
2459 if (unlikely(size)) {
2460 CPT_LOG_DP_ERR("Insufficient buffer space,"
2461 " size %d needed", size);
2465 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2466 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2468 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2470 /* This is DPTR len incase of SG mode */
2471 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2473 m_vaddr = (uint8_t *)m_vaddr + size;
2477 /* cpt alternate completion address saved earlier */
2478 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2479 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2480 rptr_dma = c_dma - 8;
2482 req->ist.ei1 = dptr_dma;
2483 req->ist.ei2 = rptr_dma;
2485 /* First 16-bit swap then 64-bit swap */
2486 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2487 * to eliminate all the swapping
2489 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2493 vq_cmd_w3.s.grp = 0;
2494 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2495 offsetof(struct cpt_ctx, k_ctx);
2497 /* 16 byte aligned cpt res address */
2498 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2499 *req->completion_addr = COMPLETION_CODE_INIT;
2500 req->comp_baddr = c_dma;
2502 /* Fill microcode part of instruction */
2503 req->ist.ei0 = vq_cmd_w0.u64;
2504 req->ist.ei3 = vq_cmd_w3.u64;
2512 static __rte_always_inline void *
2513 cpt_fc_dec_hmac_prep(uint32_t flags,
2516 fc_params_t *fc_params,
2519 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2521 void *prep_req = NULL;
2523 fc_type = ctx->fc_type;
2525 if (likely(fc_type == FC_GEN)) {
2526 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2528 } else if (fc_type == ZUC_SNOW3G) {
2529 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2531 } else if (fc_type == KASUMI) {
2532 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2536 * For AUTH_ONLY case,
2537 * MC only supports digest generation and verification
2538 * should be done in software by memcmp()
2544 static __rte_always_inline void *__hot
2545 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2546 fc_params_t *fc_params, void *op)
2548 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2550 void *prep_req = NULL;
2552 fc_type = ctx->fc_type;
2554 /* Common api for rest of the ops */
2555 if (likely(fc_type == FC_GEN)) {
2556 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2558 } else if (fc_type == ZUC_SNOW3G) {
2559 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2561 } else if (fc_type == KASUMI) {
2562 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2564 } else if (fc_type == HASH_HMAC) {
2565 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2571 static __rte_always_inline int
2572 cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
2573 uint16_t key_len, uint16_t mac_len)
2575 struct cpt_ctx *cpt_ctx = ctx;
2576 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2577 uint64_t *ctrl_flags = NULL;
2579 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2584 /* No support for AEAD yet */
2585 if (cpt_ctx->enc_cipher)
2587 /* For ZUC/SNOW3G/Kasumi */
2590 cpt_ctx->snow3g = 1;
2591 gen_key_snow3g(key, keyx);
2592 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2593 cpt_ctx->fc_type = ZUC_SNOW3G;
2594 cpt_ctx->zsk_flags = 0x1;
2597 cpt_ctx->snow3g = 0;
2598 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2599 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2600 cpt_ctx->fc_type = ZUC_SNOW3G;
2601 cpt_ctx->zsk_flags = 0x1;
2604 /* Kasumi ECB mode */
2606 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2607 cpt_ctx->fc_type = KASUMI;
2608 cpt_ctx->zsk_flags = 0x1;
2611 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2612 cpt_ctx->fc_type = KASUMI;
2613 cpt_ctx->zsk_flags = 0x1;
2618 cpt_ctx->mac_len = 4;
2619 cpt_ctx->hash_type = type;
2623 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2624 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2625 cpt_ctx->fc_type = HASH_HMAC;
2628 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
2629 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
2631 /* For GMAC auth, cipher must be NULL */
2632 if (type == GMAC_TYPE)
2633 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
2635 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
2636 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
2640 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2641 memcpy(cpt_ctx->auth_key, key, key_len);
2642 cpt_ctx->auth_key_len = key_len;
2643 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2644 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2645 memcpy(fctx->hmac.opad, key, key_len);
2646 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
2648 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
2652 static __rte_always_inline int
2653 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2654 struct cpt_sess_misc *sess)
2656 struct rte_crypto_aead_xform *aead_form;
2657 cipher_type_t enc_type = 0; /* NULL Cipher type */
2658 auth_type_t auth_type = 0; /* NULL Auth type */
2659 uint32_t cipher_key_len = 0;
2660 uint8_t zsk_flag = 0, aes_gcm = 0;
2661 aead_form = &xform->aead;
2664 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2665 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2666 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2667 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2668 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2669 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2670 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2671 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2673 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2676 switch (aead_form->algo) {
2677 case RTE_CRYPTO_AEAD_AES_GCM:
2679 cipher_key_len = 16;
2682 case RTE_CRYPTO_AEAD_AES_CCM:
2683 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2687 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2691 if (aead_form->key.length < cipher_key_len) {
2692 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2693 (unsigned int long)aead_form->key.length);
2696 sess->zsk_flag = zsk_flag;
2697 sess->aes_gcm = aes_gcm;
2698 sess->mac_len = aead_form->digest_length;
2699 sess->iv_offset = aead_form->iv.offset;
2700 sess->iv_length = aead_form->iv.length;
2701 sess->aad_length = aead_form->aad_length;
2702 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2704 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2705 aead_form->key.length, NULL);
2707 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2712 static __rte_always_inline int
2713 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2714 struct cpt_sess_misc *sess)
2716 struct rte_crypto_cipher_xform *c_form;
2717 cipher_type_t enc_type = 0; /* NULL Cipher type */
2718 uint32_t cipher_key_len = 0;
2719 uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
2721 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
2724 c_form = &xform->cipher;
2726 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2727 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2728 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2729 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2731 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2735 switch (c_form->algo) {
2736 case RTE_CRYPTO_CIPHER_AES_CBC:
2738 cipher_key_len = 16;
2740 case RTE_CRYPTO_CIPHER_3DES_CBC:
2741 enc_type = DES3_CBC;
2742 cipher_key_len = 24;
2744 case RTE_CRYPTO_CIPHER_DES_CBC:
2745 /* DES is implemented using 3DES in hardware */
2746 enc_type = DES3_CBC;
2749 case RTE_CRYPTO_CIPHER_AES_CTR:
2751 cipher_key_len = 16;
2754 case RTE_CRYPTO_CIPHER_NULL:
2758 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2759 enc_type = KASUMI_F8_ECB;
2760 cipher_key_len = 16;
2763 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2764 enc_type = SNOW3G_UEA2;
2765 cipher_key_len = 16;
2768 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2769 enc_type = ZUC_EEA3;
2770 cipher_key_len = 16;
2773 case RTE_CRYPTO_CIPHER_AES_XTS:
2775 cipher_key_len = 16;
2777 case RTE_CRYPTO_CIPHER_3DES_ECB:
2778 enc_type = DES3_ECB;
2779 cipher_key_len = 24;
2781 case RTE_CRYPTO_CIPHER_AES_ECB:
2783 cipher_key_len = 16;
2785 case RTE_CRYPTO_CIPHER_3DES_CTR:
2786 case RTE_CRYPTO_CIPHER_AES_F8:
2787 case RTE_CRYPTO_CIPHER_ARC4:
2788 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2792 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2797 if (c_form->key.length < cipher_key_len) {
2798 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2799 (unsigned long) c_form->key.length);
2803 sess->zsk_flag = zsk_flag;
2804 sess->aes_gcm = aes_gcm;
2805 sess->aes_ctr = aes_ctr;
2806 sess->iv_offset = c_form->iv.offset;
2807 sess->iv_length = c_form->iv.length;
2808 sess->is_null = is_null;
2810 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2811 c_form->key.length, NULL);
2816 static __rte_always_inline int
2817 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2818 struct cpt_sess_misc *sess)
2820 struct rte_crypto_auth_xform *a_form;
2821 auth_type_t auth_type = 0; /* NULL Auth type */
2822 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2824 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2827 a_form = &xform->auth;
2829 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2830 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2831 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2832 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2834 CPT_LOG_DP_ERR("Unknown auth operation");
2838 if (a_form->key.length > 64) {
2839 CPT_LOG_DP_ERR("Auth key length is big");
2843 switch (a_form->algo) {
2844 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2846 case RTE_CRYPTO_AUTH_SHA1:
2847 auth_type = SHA1_TYPE;
2849 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2850 case RTE_CRYPTO_AUTH_SHA256:
2851 auth_type = SHA2_SHA256;
2853 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2854 case RTE_CRYPTO_AUTH_SHA512:
2855 auth_type = SHA2_SHA512;
2857 case RTE_CRYPTO_AUTH_AES_GMAC:
2858 auth_type = GMAC_TYPE;
2861 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2862 case RTE_CRYPTO_AUTH_SHA224:
2863 auth_type = SHA2_SHA224;
2865 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2866 case RTE_CRYPTO_AUTH_SHA384:
2867 auth_type = SHA2_SHA384;
2869 case RTE_CRYPTO_AUTH_MD5_HMAC:
2870 case RTE_CRYPTO_AUTH_MD5:
2871 auth_type = MD5_TYPE;
2873 case RTE_CRYPTO_AUTH_KASUMI_F9:
2874 auth_type = KASUMI_F9_ECB;
2876 * Indicate that direction needs to be taken out
2881 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2882 auth_type = SNOW3G_UIA2;
2885 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2886 auth_type = ZUC_EIA3;
2889 case RTE_CRYPTO_AUTH_NULL:
2893 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2894 case RTE_CRYPTO_AUTH_AES_CMAC:
2895 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2896 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2900 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2905 sess->zsk_flag = zsk_flag;
2906 sess->aes_gcm = aes_gcm;
2907 sess->mac_len = a_form->digest_length;
2908 sess->is_null = is_null;
2910 sess->auth_iv_offset = a_form->iv.offset;
2911 sess->auth_iv_length = a_form->iv.length;
2913 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2914 a_form->key.length, a_form->digest_length);
2922 static __rte_always_inline int
2923 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2924 struct cpt_sess_misc *sess)
2926 struct rte_crypto_auth_xform *a_form;
2927 cipher_type_t enc_type = 0; /* NULL Cipher type */
2928 auth_type_t auth_type = 0; /* NULL Auth type */
2929 uint8_t zsk_flag = 0, aes_gcm = 0;
2932 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2935 a_form = &xform->auth;
2937 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2938 sess->cpt_op |= CPT_OP_ENCODE;
2939 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2940 sess->cpt_op |= CPT_OP_DECODE;
2942 CPT_LOG_DP_ERR("Unknown auth operation");
2946 switch (a_form->algo) {
2947 case RTE_CRYPTO_AUTH_AES_GMAC:
2949 auth_type = GMAC_TYPE;
2952 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2957 sess->zsk_flag = zsk_flag;
2958 sess->aes_gcm = aes_gcm;
2960 sess->iv_offset = a_form->iv.offset;
2961 sess->iv_length = a_form->iv.length;
2962 sess->mac_len = a_form->digest_length;
2963 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2965 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2966 a_form->key.length, NULL);
2967 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2972 static __rte_always_inline void *
2973 alloc_op_meta(struct rte_mbuf *m_src,
2976 struct rte_mempool *cpt_meta_pool)
2980 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2981 if (likely(m_src && (m_src->nb_segs == 1))) {
2985 /* Check if tailroom is sufficient to hold meta data */
2986 tailroom = rte_pktmbuf_tailroom(m_src);
2987 if (likely(tailroom > len + 8)) {
2988 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2989 mphys = m_src->buf_physaddr + m_src->buf_len;
2993 buf->dma_addr = mphys;
2995 /* Indicate that this is a mbuf allocated mdata */
2996 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
3001 RTE_SET_USED(m_src);
3004 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
3008 buf->dma_addr = rte_mempool_virt2iova(mdata);
3015 * cpt_free_metabuf - free metabuf to mempool.
3016 * @param instance: pointer to instance.
3017 * @param objp: pointer to the metabuf.
3019 static __rte_always_inline void
3020 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
3022 bool nofree = ((uintptr_t)mdata & 1ull);
3026 rte_mempool_put(cpt_meta_pool, mdata);
3029 static __rte_always_inline uint32_t
3030 prepare_iov_from_pkt(struct rte_mbuf *pkt,
3031 iov_ptr_t *iovec, uint32_t start_offset)
3034 void *seg_data = NULL;
3035 phys_addr_t seg_phys;
3036 int32_t seg_size = 0;
3043 if (!start_offset) {
3044 seg_data = rte_pktmbuf_mtod(pkt, void *);
3045 seg_phys = rte_pktmbuf_mtophys(pkt);
3046 seg_size = pkt->data_len;
3048 while (start_offset >= pkt->data_len) {
3049 start_offset -= pkt->data_len;
3053 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
3054 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
3055 seg_size = pkt->data_len - start_offset;
3061 iovec->bufs[index].vaddr = seg_data;
3062 iovec->bufs[index].dma_addr = seg_phys;
3063 iovec->bufs[index].size = seg_size;
3067 while (unlikely(pkt != NULL)) {
3068 seg_data = rte_pktmbuf_mtod(pkt, void *);
3069 seg_phys = rte_pktmbuf_mtophys(pkt);
3070 seg_size = pkt->data_len;
3074 iovec->bufs[index].vaddr = seg_data;
3075 iovec->bufs[index].dma_addr = seg_phys;
3076 iovec->bufs[index].size = seg_size;
3083 iovec->buf_cnt = index;
3087 static __rte_always_inline uint32_t
3088 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3093 void *seg_data = NULL;
3094 phys_addr_t seg_phys;
3095 uint32_t seg_size = 0;
3098 seg_data = rte_pktmbuf_mtod(pkt, void *);
3099 seg_phys = rte_pktmbuf_mtophys(pkt);
3100 seg_size = pkt->data_len;
3103 if (likely(!pkt->next)) {
3104 uint32_t headroom, tailroom;
3106 *flags |= SINGLE_BUF_INPLACE;
3107 headroom = rte_pktmbuf_headroom(pkt);
3108 tailroom = rte_pktmbuf_tailroom(pkt);
3109 if (likely((headroom >= 24) &&
3111 /* In 83XX this is prerequivisit for Direct mode */
3112 *flags |= SINGLE_BUF_HEADTAILROOM;
3114 param->bufs[0].vaddr = seg_data;
3115 param->bufs[0].dma_addr = seg_phys;
3116 param->bufs[0].size = seg_size;
3119 iovec = param->src_iov;
3120 iovec->bufs[index].vaddr = seg_data;
3121 iovec->bufs[index].dma_addr = seg_phys;
3122 iovec->bufs[index].size = seg_size;
3126 while (unlikely(pkt != NULL)) {
3127 seg_data = rte_pktmbuf_mtod(pkt, void *);
3128 seg_phys = rte_pktmbuf_mtophys(pkt);
3129 seg_size = pkt->data_len;
3134 iovec->bufs[index].vaddr = seg_data;
3135 iovec->bufs[index].dma_addr = seg_phys;
3136 iovec->bufs[index].size = seg_size;
3143 iovec->buf_cnt = index;
3147 static __rte_always_inline int
3148 fill_fc_params(struct rte_crypto_op *cop,
3149 struct cpt_sess_misc *sess_misc,
3150 struct cpt_qp_meta_info *m_info,
3155 struct rte_crypto_sym_op *sym_op = cop->sym;
3158 uint32_t mc_hash_off;
3160 uint64_t d_offs, d_lens;
3161 struct rte_mbuf *m_src, *m_dst;
3162 uint8_t cpt_op = sess_misc->cpt_op;
3163 uint8_t zsk_flag = sess_misc->zsk_flag;
3164 uint8_t aes_gcm = sess_misc->aes_gcm;
3165 uint16_t mac_len = sess_misc->mac_len;
3166 #ifdef CPT_ALWAYS_USE_SG_MODE
3167 uint8_t inplace = 0;
3169 uint8_t inplace = 1;
3171 fc_params_t fc_params;
3172 char src[SRC_IOV_SIZE];
3173 char dst[SRC_IOV_SIZE];
3177 if (likely(sess_misc->iv_length)) {
3178 flags |= VALID_IV_BUF;
3179 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3180 uint8_t *, sess_misc->iv_offset);
3181 if (sess_misc->aes_ctr &&
3182 unlikely(sess_misc->iv_length != 16)) {
3183 memcpy((uint8_t *)iv_buf,
3184 rte_crypto_op_ctod_offset(cop,
3185 uint8_t *, sess_misc->iv_offset), 12);
3186 iv_buf[3] = rte_cpu_to_be_32(0x1);
3187 fc_params.iv_buf = iv_buf;
3192 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3194 sess_misc->auth_iv_offset);
3195 if (zsk_flag == K_F9) {
3196 CPT_LOG_DP_ERR("Should not reach here for "
3199 if (zsk_flag != ZS_EA)
3202 m_src = sym_op->m_src;
3203 m_dst = sym_op->m_dst;
3210 d_offs = sym_op->aead.data.offset;
3211 d_lens = sym_op->aead.data.length;
3212 mc_hash_off = sym_op->aead.data.offset +
3213 sym_op->aead.data.length;
3215 aad_data = sym_op->aead.aad.data;
3216 aad_len = sess_misc->aad_length;
3217 if (likely((aad_data + aad_len) ==
3218 rte_pktmbuf_mtod_offset(m_src,
3220 sym_op->aead.data.offset))) {
3221 d_offs = (d_offs - aad_len) | (d_offs << 16);
3222 d_lens = (d_lens + aad_len) | (d_lens << 32);
3224 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3225 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3226 fc_params.aad_buf.size = aad_len;
3227 flags |= VALID_AAD_BUF;
3229 d_offs = d_offs << 16;
3230 d_lens = d_lens << 32;
3233 salt = fc_params.iv_buf;
3234 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3235 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3236 sess_misc->salt = *(uint32_t *)salt;
3238 fc_params.iv_buf = salt + 4;
3239 if (likely(mac_len)) {
3240 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3246 /* hmac immediately following data is best case */
3247 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3249 (uint8_t *)sym_op->aead.digest.data)) {
3250 flags |= VALID_MAC_BUF;
3251 fc_params.mac_buf.size = sess_misc->mac_len;
3252 fc_params.mac_buf.vaddr =
3253 sym_op->aead.digest.data;
3254 fc_params.mac_buf.dma_addr =
3255 sym_op->aead.digest.phys_addr;
3260 d_offs = sym_op->cipher.data.offset;
3261 d_lens = sym_op->cipher.data.length;
3262 mc_hash_off = sym_op->cipher.data.offset +
3263 sym_op->cipher.data.length;
3264 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3265 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3267 if (mc_hash_off < (sym_op->auth.data.offset +
3268 sym_op->auth.data.length)){
3269 mc_hash_off = (sym_op->auth.data.offset +
3270 sym_op->auth.data.length);
3272 /* for gmac, salt should be updated like in gcm */
3273 if (unlikely(sess_misc->is_gmac)) {
3275 salt = fc_params.iv_buf;
3276 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3277 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3278 sess_misc->salt = *(uint32_t *)salt;
3280 fc_params.iv_buf = salt + 4;
3282 if (likely(mac_len)) {
3285 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3289 /* hmac immediately following data is best case */
3290 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3292 (uint8_t *)sym_op->auth.digest.data)) {
3293 flags |= VALID_MAC_BUF;
3294 fc_params.mac_buf.size =
3296 fc_params.mac_buf.vaddr =
3297 sym_op->auth.digest.data;
3298 fc_params.mac_buf.dma_addr =
3299 sym_op->auth.digest.phys_addr;
3304 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3305 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3307 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3310 if (likely(!m_dst && inplace)) {
3311 /* Case of single buffer without AAD buf or
3312 * separate mac buf in place and
3315 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3317 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3320 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3326 /* Out of place processing */
3327 fc_params.src_iov = (void *)src;
3328 fc_params.dst_iov = (void *)dst;
3330 /* Store SG I/O in the api for reuse */
3331 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3332 CPT_LOG_DP_ERR("Prepare src iov failed");
3337 if (unlikely(m_dst != NULL)) {
3340 /* Try to make room as much as src has */
3341 m_dst = sym_op->m_dst;
3342 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3344 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3345 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3346 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3347 CPT_LOG_DP_ERR("Not enough space in "
3356 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3357 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3363 fc_params.dst_iov = (void *)src;
3367 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3368 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3369 m_info->lb_mlen, m_info->pool);
3371 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3372 m_info->sg_mlen, m_info->pool);
3374 if (unlikely(mdata == NULL)) {
3375 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3380 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3381 op[0] = (uintptr_t)mdata;
3382 op[1] = (uintptr_t)cop;
3383 op[2] = op[3] = 0; /* Used to indicate auth verify */
3384 space += 4 * sizeof(uint64_t);
3386 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3387 fc_params.meta_buf.dma_addr += space;
3388 fc_params.meta_buf.size -= space;
3390 /* Finally prepare the instruction */
3391 if (cpt_op & CPT_OP_ENCODE)
3392 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3395 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3398 if (unlikely(*prep_req == NULL)) {
3399 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3401 goto free_mdata_and_exit;
3408 free_mdata_and_exit:
3409 free_op_meta(mdata, m_info->pool);
3414 static __rte_always_inline void
3415 compl_auth_verify(struct rte_crypto_op *op,
3420 struct rte_crypto_sym_op *sym_op = op->sym;
3422 if (sym_op->auth.digest.data)
3423 mac = sym_op->auth.digest.data;
3425 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3427 sym_op->auth.data.length +
3428 sym_op->auth.data.offset);
3430 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3434 if (memcmp(mac, gen_mac, mac_len))
3435 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3437 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3440 static __rte_always_inline int
3441 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3443 struct rte_crypto_sym_xform *chain;
3445 CPT_PMD_INIT_FUNC_TRACE();
3447 if (cpt_is_algo_supported(xform))
3452 switch (chain->type) {
3453 case RTE_CRYPTO_SYM_XFORM_AEAD:
3454 if (fill_sess_aead(chain, sess))
3457 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3458 if (fill_sess_cipher(chain, sess))
3461 case RTE_CRYPTO_SYM_XFORM_AUTH:
3462 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3463 if (fill_sess_gmac(chain, sess))
3466 if (fill_sess_auth(chain, sess))
3471 CPT_LOG_DP_ERR("Invalid crypto xform type");
3474 chain = chain->next;
3483 static __rte_always_inline void
3484 find_kasumif9_direction_and_length(uint8_t *src,
3485 uint32_t counter_num_bytes,
3486 uint32_t *addr_length_in_bits,
3487 uint8_t *addr_direction)
3492 while (!found && counter_num_bytes > 0) {
3493 counter_num_bytes--;
3494 if (src[counter_num_bytes] == 0x00)
3496 pos = rte_bsf32(src[counter_num_bytes]);
3498 if (likely(counter_num_bytes > 0)) {
3499 last_byte = src[counter_num_bytes - 1];
3500 *addr_direction = last_byte & 0x1;
3501 *addr_length_in_bits = counter_num_bytes * 8
3505 last_byte = src[counter_num_bytes];
3506 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3507 *addr_length_in_bits = counter_num_bytes * 8
3515 * This handles all auth only except AES_GMAC
3517 static __rte_always_inline int
3518 fill_digest_params(struct rte_crypto_op *cop,
3519 struct cpt_sess_misc *sess,
3520 struct cpt_qp_meta_info *m_info,
3525 struct rte_crypto_sym_op *sym_op = cop->sym;
3529 uint32_t auth_range_off;
3531 uint64_t d_offs = 0, d_lens;
3532 struct rte_mbuf *m_src, *m_dst;
3533 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3534 uint8_t zsk_flag = sess->zsk_flag;
3535 uint16_t mac_len = sess->mac_len;
3537 char src[SRC_IOV_SIZE];
3541 memset(¶ms, 0, sizeof(fc_params_t));
3543 m_src = sym_op->m_src;
3545 /* For just digest lets force mempool alloc */
3546 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3548 if (mdata == NULL) {
3553 mphys = params.meta_buf.dma_addr;
3556 op[0] = (uintptr_t)mdata;
3557 op[1] = (uintptr_t)cop;
3558 op[2] = op[3] = 0; /* Used to indicate auth verify */
3559 space += 4 * sizeof(uint64_t);
3561 auth_range_off = sym_op->auth.data.offset;
3563 flags = VALID_MAC_BUF;
3564 params.src_iov = (void *)src;
3565 if (unlikely(zsk_flag)) {
3567 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3568 * we will send pass through even for auth only case,
3571 d_offs = auth_range_off;
3573 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3574 uint8_t *, sess->auth_iv_offset);
3575 if (zsk_flag == K_F9) {
3576 uint32_t length_in_bits, num_bytes;
3577 uint8_t *src, direction = 0;
3578 uint32_t counter_num_bytes;
3580 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3583 * This is kasumi f9, take direction from
3586 length_in_bits = cop->sym->auth.data.length;
3587 num_bytes = (length_in_bits >> 3);
3588 counter_num_bytes = num_bytes;
3589 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3590 find_kasumif9_direction_and_length(src,
3594 length_in_bits -= 64;
3595 cop->sym->auth.data.offset += 64;
3596 d_offs = cop->sym->auth.data.offset;
3597 auth_range_off = d_offs / 8;
3598 cop->sym->auth.data.length = length_in_bits;
3600 /* Store it at end of auth iv */
3601 iv_buf[8] = direction;
3602 params.auth_iv_buf = iv_buf;
3606 d_lens = sym_op->auth.data.length;
3608 params.ctx_buf.vaddr = SESS_PRIV(sess);
3609 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3611 if (auth_op == CPT_OP_AUTH_GENERATE) {
3612 if (sym_op->auth.digest.data) {
3614 * Digest to be generated
3615 * in separate buffer
3617 params.mac_buf.size =
3619 params.mac_buf.vaddr =
3620 sym_op->auth.digest.data;
3621 params.mac_buf.dma_addr =
3622 sym_op->auth.digest.phys_addr;
3624 uint32_t off = sym_op->auth.data.offset +
3625 sym_op->auth.data.length;
3626 int32_t dlen, space;
3628 m_dst = sym_op->m_dst ?
3629 sym_op->m_dst : sym_op->m_src;
3630 dlen = rte_pktmbuf_pkt_len(m_dst);
3632 space = off + mac_len - dlen;
3634 if (!rte_pktmbuf_append(m_dst, space)) {
3635 CPT_LOG_DP_ERR("Failed to extend "
3636 "mbuf by %uB", space);
3638 goto free_mdata_and_exit;
3641 params.mac_buf.vaddr =
3642 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3643 params.mac_buf.dma_addr =
3644 rte_pktmbuf_mtophys_offset(m_dst, off);
3645 params.mac_buf.size = mac_len;
3648 /* Need space for storing generated mac */
3649 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3650 params.mac_buf.dma_addr = mphys + space;
3651 params.mac_buf.size = mac_len;
3652 space += RTE_ALIGN_CEIL(mac_len, 8);
3653 op[2] = (uintptr_t)params.mac_buf.vaddr;
3657 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3658 params.meta_buf.dma_addr = mphys + space;
3659 params.meta_buf.size -= space;
3661 /* Out of place processing */
3662 params.src_iov = (void *)src;
3664 /*Store SG I/O in the api for reuse */
3665 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3666 CPT_LOG_DP_ERR("Prepare src iov failed");
3668 goto free_mdata_and_exit;
3671 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3672 if (unlikely(*prep_req == NULL)) {
3674 goto free_mdata_and_exit;
3681 free_mdata_and_exit:
3682 free_op_meta(mdata, m_info->pool);
3687 #endif /*_CPT_UCODE_H_ */