1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
29 * Microcode only supports the following combination.
30 * Encryption followed by authentication
31 * Authentication followed by decryption
34 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 /* Unsupported as of now by microcode */
38 CPT_LOG_DP_ERR("Unsupported combination");
41 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 /* For GMAC auth there is no cipher operation */
45 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 xform->next->auth.algo !=
47 RTE_CRYPTO_AUTH_AES_GMAC) {
48 /* Unsupported as of now by microcode */
49 CPT_LOG_DP_ERR("Unsupported combination");
57 static __rte_always_inline void
58 gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
62 for (i = 0; i < 4; i++) {
64 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 (ck[base + 2] << 8) | (ck[base + 3]);
66 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
74 struct cpt_ctx *cpt_ctx = ctx;
75 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
91 static __rte_always_inline int
92 cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
109 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
114 key_len = key_len / 2;
115 if (unlikely(key_len == CPT_BYTE_24)) {
116 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
119 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
125 if (unlikely(key_len != 16))
127 /* No support for AEAD yet */
128 if (unlikely(cpt_ctx->hash_type))
130 fc_type = ZUC_SNOW3G;
134 if (unlikely(key_len != 16))
136 /* No support for AEAD yet */
137 if (unlikely(cpt_ctx->hash_type))
147 static __rte_always_inline void
148 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
150 cpt_ctx->enc_cipher = 0;
151 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
154 static __rte_always_inline void
155 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
157 mc_aes_type_t aes_key_type = 0;
160 aes_key_type = AES_128_BIT;
163 aes_key_type = AES_192_BIT;
166 aes_key_type = AES_256_BIT;
169 /* This should not happen */
170 CPT_LOG_DP_ERR("Invalid AES key len");
173 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
176 static __rte_always_inline void
177 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
182 gen_key_snow3g(key, keyx);
183 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
184 cpt_ctx->fc_type = ZUC_SNOW3G;
185 cpt_ctx->zsk_flags = 0;
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
193 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 cpt_ctx->fc_type = ZUC_SNOW3G;
196 cpt_ctx->zsk_flags = 0;
199 static __rte_always_inline void
200 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
204 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
205 cpt_ctx->zsk_flags = 0;
206 cpt_ctx->fc_type = KASUMI;
209 static __rte_always_inline void
210 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
213 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
214 cpt_ctx->zsk_flags = 0;
215 cpt_ctx->fc_type = KASUMI;
218 static __rte_always_inline int
219 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
220 uint16_t key_len, uint8_t *salt)
222 struct cpt_ctx *cpt_ctx = ctx;
223 mc_fc_context_t *fctx = &cpt_ctx->fctx;
224 uint64_t *ctrl_flags = NULL;
227 /* Validate key before proceeding */
228 fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
229 if (unlikely(fc_type == -1))
232 if (fc_type == FC_GEN) {
233 cpt_ctx->fc_type = FC_GEN;
234 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
235 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
237 * We need to always say IV is from DPTR as user can
238 * sometimes iverride IV per operation.
240 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
245 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
248 /* CPT performs DES using 3DES with the 8B DES-key
249 * replicated 2 more times to match the 24B 3DES-key.
250 * Eg. If org. key is "0x0a 0x0b", then new key is
251 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
254 /* Skipping the first 8B as it will be copied
255 * in the regular code flow
257 memcpy(fctx->enc.encr_key+key_len, key, key_len);
258 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
262 /* For DES3_ECB IV need to be from CTX. */
263 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
269 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
272 /* Even though iv source is from dptr,
273 * aes_gcm salt is taken from ctx
276 memcpy(fctx->enc.encr_iv, salt, 4);
277 /* Assuming it was just salt update
283 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
286 key_len = key_len / 2;
287 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
289 /* Copy key2 for XTS into ipad */
290 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
291 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
294 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
297 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
300 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
303 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
309 /* Only for FC_GEN case */
311 /* For GMAC auth, cipher must be NULL */
312 if (cpt_ctx->hash_type != GMAC_TYPE)
313 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
315 memcpy(fctx->enc.encr_key, key, key_len);
318 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
321 cpt_ctx->enc_cipher = type;
326 static __rte_always_inline uint32_t
327 fill_sg_comp(sg_comp_t *list,
329 phys_addr_t dma_addr,
332 sg_comp_t *to = &list[i>>2];
334 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
335 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
340 static __rte_always_inline uint32_t
341 fill_sg_comp_from_buf(sg_comp_t *list,
345 sg_comp_t *to = &list[i>>2];
347 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
348 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
353 static __rte_always_inline uint32_t
354 fill_sg_comp_from_buf_min(sg_comp_t *list,
359 sg_comp_t *to = &list[i >> 2];
360 uint32_t size = *psize;
363 e_len = (size > from->size) ? from->size : size;
364 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
365 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
372 * This fills the MC expected SGIO list
373 * from IOV given by user.
375 static __rte_always_inline uint32_t
376 fill_sg_comp_from_iov(sg_comp_t *list,
378 iov_ptr_t *from, uint32_t from_offset,
379 uint32_t *psize, buf_ptr_t *extra_buf,
380 uint32_t extra_offset)
383 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
384 uint32_t size = *psize - extra_len;
388 for (j = 0; (j < from->buf_cnt) && size; j++) {
389 phys_addr_t e_dma_addr;
391 sg_comp_t *to = &list[i >> 2];
396 if (unlikely(from_offset)) {
397 if (from_offset >= bufs[j].size) {
398 from_offset -= bufs[j].size;
401 e_dma_addr = bufs[j].dma_addr + from_offset;
402 e_len = (size > (bufs[j].size - from_offset)) ?
403 (bufs[j].size - from_offset) : size;
406 e_dma_addr = bufs[j].dma_addr;
407 e_len = (size > bufs[j].size) ?
411 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
412 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
414 if (extra_len && (e_len >= extra_offset)) {
415 /* Break the data at given offset */
416 uint32_t next_len = e_len - extra_offset;
417 phys_addr_t next_dma = e_dma_addr + extra_offset;
422 e_len = extra_offset;
424 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
427 /* Insert extra data ptr */
432 rte_cpu_to_be_16(extra_buf->size);
434 rte_cpu_to_be_64(extra_buf->dma_addr);
436 /* size already decremented by extra len */
439 /* insert the rest of the data */
443 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
444 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
453 extra_offset -= size;
461 static __rte_always_inline int
462 cpt_digest_gen_prep(uint32_t flags,
464 digest_params_t *params,
468 struct cpt_request_info *req;
471 uint16_t data_len, mac_len, key_len;
472 auth_type_t hash_type;
475 sg_comp_t *gather_comp;
476 sg_comp_t *scatter_comp;
478 uint32_t g_size_bytes, s_size_bytes;
479 uint64_t dptr_dma, rptr_dma;
480 vq_cmd_word0_t vq_cmd_w0;
481 vq_cmd_word3_t vq_cmd_w3;
482 void *c_vaddr, *m_vaddr;
483 uint64_t c_dma, m_dma;
484 opcode_info_t opcode;
486 if (!params || !params->ctx_buf.vaddr)
487 return ERR_BAD_INPUT_ARG;
489 ctx = params->ctx_buf.vaddr;
490 meta_p = ¶ms->meta_buf;
492 if (!meta_p->vaddr || !meta_p->dma_addr)
493 return ERR_BAD_INPUT_ARG;
495 if (meta_p->size < sizeof(struct cpt_request_info))
496 return ERR_BAD_INPUT_ARG;
498 m_vaddr = meta_p->vaddr;
499 m_dma = meta_p->dma_addr;
500 m_size = meta_p->size;
503 * Save initial space that followed app data for completion code &
504 * alternate completion code to fall in same cache line as app data
506 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
507 m_dma += COMPLETION_CODE_SIZE;
508 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
510 c_vaddr = (uint8_t *)m_vaddr + size;
511 c_dma = m_dma + size;
512 size += sizeof(cpt_res_s_t);
514 m_vaddr = (uint8_t *)m_vaddr + size;
520 size = sizeof(struct cpt_request_info);
521 m_vaddr = (uint8_t *)m_vaddr + size;
525 hash_type = ctx->hash_type;
526 mac_len = ctx->mac_len;
527 key_len = ctx->auth_key_len;
528 data_len = AUTH_DLEN(d_lens);
532 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
534 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
535 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
537 rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
539 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
540 vq_cmd_w0.s.param1 = 0;
541 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
546 /* Null auth only case enters the if */
547 if (unlikely(!hash_type && !ctx->enc_cipher)) {
548 opcode.s.major = CPT_MAJOR_OP_MISC;
549 /* Minor op is passthrough */
550 opcode.s.minor = 0x03;
551 /* Send out completion code only */
552 vq_cmd_w0.s.param2 = 0x1;
555 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
557 /* DPTR has SG list */
561 ((uint16_t *)in_buffer)[0] = 0;
562 ((uint16_t *)in_buffer)[1] = 0;
564 /* TODO Add error check if space will be sufficient */
565 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
574 uint64_t k_dma = params->ctx_buf.dma_addr +
575 offsetof(struct cpt_ctx, auth_key);
577 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
583 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
586 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
588 return ERR_BAD_INPUT_ARG;
592 * Looks like we need to support zero data
593 * gather ptr in case of hash & hmac
597 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
598 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
605 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
607 if (flags & VALID_MAC_BUF) {
608 if (params->mac_buf.size < mac_len)
609 return ERR_BAD_INPUT_ARG;
612 i = fill_sg_comp_from_buf_min(scatter_comp, i,
613 ¶ms->mac_buf, &size);
616 i = fill_sg_comp_from_iov(scatter_comp, i,
617 params->src_iov, data_len,
620 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short by"
622 return ERR_BAD_INPUT_ARG;
626 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
627 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
629 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
631 /* This is DPTR len incase of SG mode */
632 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
634 m_vaddr = (uint8_t *)m_vaddr + size;
638 /* cpt alternate completion address saved earlier */
639 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
640 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
641 rptr_dma = c_dma - 8;
643 req->ist.ei1 = dptr_dma;
644 req->ist.ei2 = rptr_dma;
645 /* First 16-bit swap then 64-bit swap */
646 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
647 * to eliminate all the swapping
649 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
654 /* 16 byte aligned cpt res address */
655 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
656 *req->completion_addr = COMPLETION_CODE_INIT;
657 req->comp_baddr = c_dma;
659 /* Fill microcode part of instruction */
660 req->ist.ei0 = vq_cmd_w0.u64;
661 req->ist.ei3 = vq_cmd_w3.u64;
669 static __rte_always_inline int
670 cpt_enc_hmac_prep(uint32_t flags,
673 fc_params_t *fc_params,
677 uint32_t iv_offset = 0;
678 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
679 struct cpt_ctx *cpt_ctx;
680 uint32_t cipher_type, hash_type;
681 uint32_t mac_len, size;
683 struct cpt_request_info *req;
684 buf_ptr_t *meta_p, *aad_buf = NULL;
685 uint32_t encr_offset, auth_offset;
686 uint32_t encr_data_len, auth_data_len, aad_len = 0;
687 uint32_t passthrough_len = 0;
688 void *m_vaddr, *offset_vaddr;
689 uint64_t m_dma, offset_dma, ctx_dma;
690 vq_cmd_word0_t vq_cmd_w0;
691 vq_cmd_word3_t vq_cmd_w3;
695 opcode_info_t opcode;
697 meta_p = &fc_params->meta_buf;
698 m_vaddr = meta_p->vaddr;
699 m_dma = meta_p->dma_addr;
700 m_size = meta_p->size;
702 encr_offset = ENCR_OFFSET(d_offs);
703 auth_offset = AUTH_OFFSET(d_offs);
704 encr_data_len = ENCR_DLEN(d_lens);
705 auth_data_len = AUTH_DLEN(d_lens);
706 if (unlikely(flags & VALID_AAD_BUF)) {
708 * We dont support both aad
709 * and auth data separately
713 aad_len = fc_params->aad_buf.size;
714 aad_buf = &fc_params->aad_buf;
716 cpt_ctx = fc_params->ctx_buf.vaddr;
717 cipher_type = cpt_ctx->enc_cipher;
718 hash_type = cpt_ctx->hash_type;
719 mac_len = cpt_ctx->mac_len;
722 * Save initial space that followed app data for completion code &
723 * alternate completion code to fall in same cache line as app data
725 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
726 m_dma += COMPLETION_CODE_SIZE;
727 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
730 c_vaddr = (uint8_t *)m_vaddr + size;
731 c_dma = m_dma + size;
732 size += sizeof(cpt_res_s_t);
734 m_vaddr = (uint8_t *)m_vaddr + size;
738 /* start cpt request info struct at 8 byte boundary */
739 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
742 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
744 size += sizeof(struct cpt_request_info);
745 m_vaddr = (uint8_t *)m_vaddr + size;
749 if (hash_type == GMAC_TYPE)
752 if (unlikely(!(flags & VALID_IV_BUF))) {
754 iv_offset = ENCR_IV_OFFSET(d_offs);
757 if (unlikely(flags & VALID_AAD_BUF)) {
759 * When AAD is given, data above encr_offset is pass through
760 * Since AAD is given as separate pointer and not as offset,
761 * this is a special case as we need to fragment input data
762 * into passthrough + encr_data and then insert AAD in between.
764 if (hash_type != GMAC_TYPE) {
765 passthrough_len = encr_offset;
766 auth_offset = passthrough_len + iv_len;
767 encr_offset = passthrough_len + aad_len + iv_len;
768 auth_data_len = aad_len + encr_data_len;
770 passthrough_len = 16 + aad_len;
771 auth_offset = passthrough_len + iv_len;
772 auth_data_len = aad_len;
775 encr_offset += iv_len;
776 auth_offset += iv_len;
780 opcode.s.major = CPT_MAJOR_OP_FC;
783 auth_dlen = auth_offset + auth_data_len;
784 enc_dlen = encr_data_len + encr_offset;
785 if (unlikely(encr_data_len & 0xf)) {
786 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
787 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
788 else if (likely((cipher_type == AES_CBC) ||
789 (cipher_type == AES_ECB)))
790 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
793 if (unlikely(hash_type == GMAC_TYPE)) {
794 encr_offset = auth_dlen;
798 if (unlikely(auth_dlen > enc_dlen)) {
799 inputlen = auth_dlen;
800 outputlen = auth_dlen + mac_len;
803 outputlen = enc_dlen + mac_len;
808 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
809 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
811 * In 83XX since we have a limitation of
812 * IV & Offset control word not part of instruction
813 * and need to be part of Data Buffer, we check if
814 * head room is there and then only do the Direct mode processing
816 if (likely((flags & SINGLE_BUF_INPLACE) &&
817 (flags & SINGLE_BUF_HEADTAILROOM))) {
818 void *dm_vaddr = fc_params->bufs[0].vaddr;
819 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
821 * This flag indicates that there is 24 bytes head room and
822 * 8 bytes tail room available, so that we get to do
823 * DIRECT MODE with limitation
826 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
827 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
830 req->ist.ei1 = offset_dma;
831 /* RPTR should just exclude offset control word */
832 req->ist.ei2 = dm_dma_addr - iv_len;
833 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
834 + outputlen - iv_len);
836 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
838 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
840 if (likely(iv_len)) {
841 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
843 uint64_t *src = fc_params->iv_buf;
848 *(uint64_t *)offset_vaddr =
849 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
850 ((uint64_t)iv_offset << 8) |
851 ((uint64_t)auth_offset));
854 uint32_t i, g_size_bytes, s_size_bytes;
855 uint64_t dptr_dma, rptr_dma;
856 sg_comp_t *gather_comp;
857 sg_comp_t *scatter_comp;
860 /* This falls under strict SG mode */
861 offset_vaddr = m_vaddr;
863 size = OFF_CTRL_LEN + iv_len;
865 m_vaddr = (uint8_t *)m_vaddr + size;
869 opcode.s.major |= CPT_DMA_MODE;
871 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
873 if (likely(iv_len)) {
874 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
876 uint64_t *src = fc_params->iv_buf;
881 *(uint64_t *)offset_vaddr =
882 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
883 ((uint64_t)iv_offset << 8) |
884 ((uint64_t)auth_offset));
886 /* DPTR has SG list */
890 ((uint16_t *)in_buffer)[0] = 0;
891 ((uint16_t *)in_buffer)[1] = 0;
893 /* TODO Add error check if space will be sufficient */
894 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
902 /* Offset control word that includes iv */
903 i = fill_sg_comp(gather_comp, i, offset_dma,
904 OFF_CTRL_LEN + iv_len);
907 size = inputlen - iv_len;
909 uint32_t aad_offset = aad_len ? passthrough_len : 0;
911 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
912 i = fill_sg_comp_from_buf_min(gather_comp, i,
916 i = fill_sg_comp_from_iov(gather_comp, i,
919 aad_buf, aad_offset);
922 if (unlikely(size)) {
923 CPT_LOG_DP_ERR("Insufficient buffer space,"
924 " size %d needed", size);
925 return ERR_BAD_INPUT_ARG;
928 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
929 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
932 * Output Scatter list
936 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
939 if (likely(iv_len)) {
940 i = fill_sg_comp(scatter_comp, i,
941 offset_dma + OFF_CTRL_LEN,
945 /* output data or output data + digest*/
946 if (unlikely(flags & VALID_MAC_BUF)) {
947 size = outputlen - iv_len - mac_len;
949 uint32_t aad_offset =
950 aad_len ? passthrough_len : 0;
952 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
953 i = fill_sg_comp_from_buf_min(
959 i = fill_sg_comp_from_iov(scatter_comp,
968 return ERR_BAD_INPUT_ARG;
972 i = fill_sg_comp_from_buf(scatter_comp, i,
973 &fc_params->mac_buf);
976 /* Output including mac */
977 size = outputlen - iv_len;
979 uint32_t aad_offset =
980 aad_len ? passthrough_len : 0;
982 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
983 i = fill_sg_comp_from_buf_min(
989 i = fill_sg_comp_from_iov(scatter_comp,
997 if (unlikely(size)) {
998 CPT_LOG_DP_ERR("Insufficient buffer"
999 " space, size %d needed",
1001 return ERR_BAD_INPUT_ARG;
1005 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1006 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1008 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1010 /* This is DPTR len incase of SG mode */
1011 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1013 m_vaddr = (uint8_t *)m_vaddr + size;
1017 /* cpt alternate completion address saved earlier */
1018 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1019 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1020 rptr_dma = c_dma - 8;
1022 req->ist.ei1 = dptr_dma;
1023 req->ist.ei2 = rptr_dma;
1026 /* First 16-bit swap then 64-bit swap */
1027 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1028 * to eliminate all the swapping
1030 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1032 ctx_dma = fc_params->ctx_buf.dma_addr +
1033 offsetof(struct cpt_ctx, fctx);
1036 vq_cmd_w3.s.grp = 0;
1037 vq_cmd_w3.s.cptr = ctx_dma;
1039 /* 16 byte aligned cpt res address */
1040 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1041 *req->completion_addr = COMPLETION_CODE_INIT;
1042 req->comp_baddr = c_dma;
1044 /* Fill microcode part of instruction */
1045 req->ist.ei0 = vq_cmd_w0.u64;
1046 req->ist.ei3 = vq_cmd_w3.u64;
1054 static __rte_always_inline int
1055 cpt_dec_hmac_prep(uint32_t flags,
1058 fc_params_t *fc_params,
1062 uint32_t iv_offset = 0, size;
1063 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1064 struct cpt_ctx *cpt_ctx;
1065 int32_t hash_type, mac_len, m_size;
1066 uint8_t iv_len = 16;
1067 struct cpt_request_info *req;
1068 buf_ptr_t *meta_p, *aad_buf = NULL;
1069 uint32_t encr_offset, auth_offset;
1070 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1071 uint32_t passthrough_len = 0;
1072 void *m_vaddr, *offset_vaddr;
1073 uint64_t m_dma, offset_dma, ctx_dma;
1074 opcode_info_t opcode;
1075 vq_cmd_word0_t vq_cmd_w0;
1076 vq_cmd_word3_t vq_cmd_w3;
1080 meta_p = &fc_params->meta_buf;
1081 m_vaddr = meta_p->vaddr;
1082 m_dma = meta_p->dma_addr;
1083 m_size = meta_p->size;
1085 encr_offset = ENCR_OFFSET(d_offs);
1086 auth_offset = AUTH_OFFSET(d_offs);
1087 encr_data_len = ENCR_DLEN(d_lens);
1088 auth_data_len = AUTH_DLEN(d_lens);
1090 if (unlikely(flags & VALID_AAD_BUF)) {
1092 * We dont support both aad
1093 * and auth data separately
1097 aad_len = fc_params->aad_buf.size;
1098 aad_buf = &fc_params->aad_buf;
1101 cpt_ctx = fc_params->ctx_buf.vaddr;
1102 hash_type = cpt_ctx->hash_type;
1103 mac_len = cpt_ctx->mac_len;
1105 if (hash_type == GMAC_TYPE)
1108 if (unlikely(!(flags & VALID_IV_BUF))) {
1110 iv_offset = ENCR_IV_OFFSET(d_offs);
1113 if (unlikely(flags & VALID_AAD_BUF)) {
1115 * When AAD is given, data above encr_offset is pass through
1116 * Since AAD is given as separate pointer and not as offset,
1117 * this is a special case as we need to fragment input data
1118 * into passthrough + encr_data and then insert AAD in between.
1120 if (hash_type != GMAC_TYPE) {
1121 passthrough_len = encr_offset;
1122 auth_offset = passthrough_len + iv_len;
1123 encr_offset = passthrough_len + aad_len + iv_len;
1124 auth_data_len = aad_len + encr_data_len;
1126 passthrough_len = 16 + aad_len;
1127 auth_offset = passthrough_len + iv_len;
1128 auth_data_len = aad_len;
1131 encr_offset += iv_len;
1132 auth_offset += iv_len;
1136 * Save initial space that followed app data for completion code &
1137 * alternate completion code to fall in same cache line as app data
1139 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1140 m_dma += COMPLETION_CODE_SIZE;
1141 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1143 c_vaddr = (uint8_t *)m_vaddr + size;
1144 c_dma = m_dma + size;
1145 size += sizeof(cpt_res_s_t);
1147 m_vaddr = (uint8_t *)m_vaddr + size;
1151 /* start cpt request info structure at 8 byte alignment */
1152 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1155 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1157 size += sizeof(struct cpt_request_info);
1158 m_vaddr = (uint8_t *)m_vaddr + size;
1163 opcode.s.major = CPT_MAJOR_OP_FC;
1166 enc_dlen = encr_offset + encr_data_len;
1167 auth_dlen = auth_offset + auth_data_len;
1169 if (auth_dlen > enc_dlen) {
1170 inputlen = auth_dlen + mac_len;
1171 outputlen = auth_dlen;
1173 inputlen = enc_dlen + mac_len;
1174 outputlen = enc_dlen;
1177 if (hash_type == GMAC_TYPE)
1178 encr_offset = inputlen;
1181 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1182 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1185 * In 83XX since we have a limitation of
1186 * IV & Offset control word not part of instruction
1187 * and need to be part of Data Buffer, we check if
1188 * head room is there and then only do the Direct mode processing
1190 if (likely((flags & SINGLE_BUF_INPLACE) &&
1191 (flags & SINGLE_BUF_HEADTAILROOM))) {
1192 void *dm_vaddr = fc_params->bufs[0].vaddr;
1193 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1195 * This flag indicates that there is 24 bytes head room and
1196 * 8 bytes tail room available, so that we get to do
1197 * DIRECT MODE with limitation
1200 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1201 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1202 req->ist.ei1 = offset_dma;
1204 /* RPTR should just exclude offset control word */
1205 req->ist.ei2 = dm_dma_addr - iv_len;
1207 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1208 outputlen - iv_len);
1209 /* since this is decryption,
1210 * don't touch the content of
1211 * alternate ccode space as it contains
1215 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1217 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1219 if (likely(iv_len)) {
1220 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1222 uint64_t *src = fc_params->iv_buf;
1227 *(uint64_t *)offset_vaddr =
1228 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1229 ((uint64_t)iv_offset << 8) |
1230 ((uint64_t)auth_offset));
1233 uint64_t dptr_dma, rptr_dma;
1234 uint32_t g_size_bytes, s_size_bytes;
1235 sg_comp_t *gather_comp;
1236 sg_comp_t *scatter_comp;
1240 /* This falls under strict SG mode */
1241 offset_vaddr = m_vaddr;
1243 size = OFF_CTRL_LEN + iv_len;
1245 m_vaddr = (uint8_t *)m_vaddr + size;
1249 opcode.s.major |= CPT_DMA_MODE;
1251 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1253 if (likely(iv_len)) {
1254 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1256 uint64_t *src = fc_params->iv_buf;
1261 *(uint64_t *)offset_vaddr =
1262 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1263 ((uint64_t)iv_offset << 8) |
1264 ((uint64_t)auth_offset));
1266 /* DPTR has SG list */
1267 in_buffer = m_vaddr;
1270 ((uint16_t *)in_buffer)[0] = 0;
1271 ((uint16_t *)in_buffer)[1] = 0;
1273 /* TODO Add error check if space will be sufficient */
1274 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1281 /* Offset control word that includes iv */
1282 i = fill_sg_comp(gather_comp, i, offset_dma,
1283 OFF_CTRL_LEN + iv_len);
1285 /* Add input data */
1286 if (flags & VALID_MAC_BUF) {
1287 size = inputlen - iv_len - mac_len;
1289 /* input data only */
1290 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1291 i = fill_sg_comp_from_buf_min(
1296 uint32_t aad_offset = aad_len ?
1297 passthrough_len : 0;
1299 i = fill_sg_comp_from_iov(gather_comp,
1307 return ERR_BAD_INPUT_ARG;
1312 i = fill_sg_comp_from_buf(gather_comp, i,
1313 &fc_params->mac_buf);
1316 /* input data + mac */
1317 size = inputlen - iv_len;
1319 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1320 i = fill_sg_comp_from_buf_min(
1325 uint32_t aad_offset = aad_len ?
1326 passthrough_len : 0;
1328 if (!fc_params->src_iov)
1329 return ERR_BAD_INPUT_ARG;
1331 i = fill_sg_comp_from_iov(
1340 return ERR_BAD_INPUT_ARG;
1343 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1344 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1347 * Output Scatter List
1352 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1356 i = fill_sg_comp(scatter_comp, i,
1357 offset_dma + OFF_CTRL_LEN,
1361 /* Add output data */
1362 size = outputlen - iv_len;
1364 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1365 /* handle single buffer here */
1366 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1370 uint32_t aad_offset = aad_len ?
1371 passthrough_len : 0;
1373 if (!fc_params->dst_iov)
1374 return ERR_BAD_INPUT_ARG;
1376 i = fill_sg_comp_from_iov(scatter_comp, i,
1377 fc_params->dst_iov, 0,
1383 return ERR_BAD_INPUT_ARG;
1386 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1387 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1389 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1391 /* This is DPTR len incase of SG mode */
1392 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1394 m_vaddr = (uint8_t *)m_vaddr + size;
1398 /* cpt alternate completion address saved earlier */
1399 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1400 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1401 rptr_dma = c_dma - 8;
1402 size += COMPLETION_CODE_SIZE;
1404 req->ist.ei1 = dptr_dma;
1405 req->ist.ei2 = rptr_dma;
1408 /* First 16-bit swap then 64-bit swap */
1409 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1410 * to eliminate all the swapping
1412 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1414 ctx_dma = fc_params->ctx_buf.dma_addr +
1415 offsetof(struct cpt_ctx, fctx);
1418 vq_cmd_w3.s.grp = 0;
1419 vq_cmd_w3.s.cptr = ctx_dma;
1421 /* 16 byte aligned cpt res address */
1422 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1423 *req->completion_addr = COMPLETION_CODE_INIT;
1424 req->comp_baddr = c_dma;
1426 /* Fill microcode part of instruction */
1427 req->ist.ei0 = vq_cmd_w0.u64;
1428 req->ist.ei3 = vq_cmd_w3.u64;
1436 static __rte_always_inline int
1437 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1440 fc_params_t *params,
1445 int32_t inputlen, outputlen;
1446 struct cpt_ctx *cpt_ctx;
1447 uint32_t mac_len = 0;
1449 struct cpt_request_info *req;
1451 uint32_t encr_offset = 0, auth_offset = 0;
1452 uint32_t encr_data_len = 0, auth_data_len = 0;
1453 int flags, iv_len = 16, m_size;
1454 void *m_vaddr, *c_vaddr;
1455 uint64_t m_dma, c_dma, offset_ctrl;
1456 uint64_t *offset_vaddr, offset_dma;
1457 uint32_t *iv_s, iv[4];
1458 vq_cmd_word0_t vq_cmd_w0;
1459 vq_cmd_word3_t vq_cmd_w3;
1460 opcode_info_t opcode;
1462 buf_p = ¶ms->meta_buf;
1463 m_vaddr = buf_p->vaddr;
1464 m_dma = buf_p->dma_addr;
1465 m_size = buf_p->size;
1467 cpt_ctx = params->ctx_buf.vaddr;
1468 flags = cpt_ctx->zsk_flags;
1469 mac_len = cpt_ctx->mac_len;
1470 snow3g = cpt_ctx->snow3g;
1473 * Save initial space that followed app data for completion code &
1474 * alternate completion code to fall in same cache line as app data
1476 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1477 m_dma += COMPLETION_CODE_SIZE;
1478 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1481 c_vaddr = (uint8_t *)m_vaddr + size;
1482 c_dma = m_dma + size;
1483 size += sizeof(cpt_res_s_t);
1485 m_vaddr = (uint8_t *)m_vaddr + size;
1489 /* Reserve memory for cpt request info */
1492 size = sizeof(struct cpt_request_info);
1493 m_vaddr = (uint8_t *)m_vaddr + size;
1497 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1499 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1500 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1501 (0 << 3) | (flags & 0x7));
1505 * Microcode expects offsets in bytes
1506 * TODO: Rounding off
1508 auth_data_len = AUTH_DLEN(d_lens);
1511 auth_offset = AUTH_OFFSET(d_offs);
1512 auth_offset = auth_offset / 8;
1514 /* consider iv len */
1515 auth_offset += iv_len;
1517 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1518 outputlen = mac_len;
1520 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1525 * Microcode expects offsets in bytes
1526 * TODO: Rounding off
1528 encr_data_len = ENCR_DLEN(d_lens);
1530 encr_offset = ENCR_OFFSET(d_offs);
1531 encr_offset = encr_offset / 8;
1532 /* consider iv len */
1533 encr_offset += iv_len;
1535 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1536 outputlen = inputlen;
1538 /* iv offset is 0 */
1539 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1543 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1548 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1549 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1552 for (j = 0; j < 4; j++)
1553 iv[j] = iv_s[3 - j];
1555 /* ZUC doesn't need a swap */
1556 for (j = 0; j < 4; j++)
1561 * GP op header, lengths are expected in bits.
1564 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1565 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1568 * In 83XX since we have a limitation of
1569 * IV & Offset control word not part of instruction
1570 * and need to be part of Data Buffer, we check if
1571 * head room is there and then only do the Direct mode processing
1573 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1574 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1575 void *dm_vaddr = params->bufs[0].vaddr;
1576 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1578 * This flag indicates that there is 24 bytes head room and
1579 * 8 bytes tail room available, so that we get to do
1580 * DIRECT MODE with limitation
1583 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1584 OFF_CTRL_LEN - iv_len);
1585 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1588 req->ist.ei1 = offset_dma;
1589 /* RPTR should just exclude offset control word */
1590 req->ist.ei2 = dm_dma_addr - iv_len;
1591 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1592 + outputlen - iv_len);
1594 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1596 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1598 if (likely(iv_len)) {
1599 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1601 memcpy(iv_d, iv, 16);
1604 *offset_vaddr = offset_ctrl;
1606 uint32_t i, g_size_bytes, s_size_bytes;
1607 uint64_t dptr_dma, rptr_dma;
1608 sg_comp_t *gather_comp;
1609 sg_comp_t *scatter_comp;
1613 /* save space for iv */
1614 offset_vaddr = m_vaddr;
1617 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1618 m_dma += OFF_CTRL_LEN + iv_len;
1619 m_size -= OFF_CTRL_LEN + iv_len;
1621 opcode.s.major |= CPT_DMA_MODE;
1623 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1625 /* DPTR has SG list */
1626 in_buffer = m_vaddr;
1629 ((uint16_t *)in_buffer)[0] = 0;
1630 ((uint16_t *)in_buffer)[1] = 0;
1632 /* TODO Add error check if space will be sufficient */
1633 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1640 /* Offset control word followed by iv */
1642 i = fill_sg_comp(gather_comp, i, offset_dma,
1643 OFF_CTRL_LEN + iv_len);
1645 /* iv offset is 0 */
1646 *offset_vaddr = offset_ctrl;
1648 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1649 memcpy(iv_d, iv, 16);
1652 size = inputlen - iv_len;
1654 i = fill_sg_comp_from_iov(gather_comp, i,
1658 return ERR_BAD_INPUT_ARG;
1660 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1661 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1664 * Output Scatter List
1669 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1672 /* IV in SLIST only for EEA3 & UEA2 */
1677 i = fill_sg_comp(scatter_comp, i,
1678 offset_dma + OFF_CTRL_LEN, iv_len);
1681 /* Add output data */
1682 if (req_flags & VALID_MAC_BUF) {
1683 size = outputlen - iv_len - mac_len;
1685 i = fill_sg_comp_from_iov(scatter_comp, i,
1690 return ERR_BAD_INPUT_ARG;
1695 i = fill_sg_comp_from_buf(scatter_comp, i,
1699 /* Output including mac */
1700 size = outputlen - iv_len;
1702 i = fill_sg_comp_from_iov(scatter_comp, i,
1707 return ERR_BAD_INPUT_ARG;
1710 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1711 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1713 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1715 /* This is DPTR len incase of SG mode */
1716 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1718 m_vaddr = (uint8_t *)m_vaddr + size;
1722 /* cpt alternate completion address saved earlier */
1723 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1724 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1725 rptr_dma = c_dma - 8;
1727 req->ist.ei1 = dptr_dma;
1728 req->ist.ei2 = rptr_dma;
1731 /* First 16-bit swap then 64-bit swap */
1732 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1733 * to eliminate all the swapping
1735 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1739 vq_cmd_w3.s.grp = 0;
1740 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1741 offsetof(struct cpt_ctx, zs_ctx);
1743 /* 16 byte aligned cpt res address */
1744 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1745 *req->completion_addr = COMPLETION_CODE_INIT;
1746 req->comp_baddr = c_dma;
1748 /* Fill microcode part of instruction */
1749 req->ist.ei0 = vq_cmd_w0.u64;
1750 req->ist.ei3 = vq_cmd_w3.u64;
1758 static __rte_always_inline int
1759 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1762 fc_params_t *params,
1767 int32_t inputlen = 0, outputlen;
1768 struct cpt_ctx *cpt_ctx;
1769 uint8_t snow3g, iv_len = 16;
1770 struct cpt_request_info *req;
1772 uint32_t encr_offset;
1773 uint32_t encr_data_len;
1775 void *m_vaddr, *c_vaddr;
1776 uint64_t m_dma, c_dma;
1777 uint64_t *offset_vaddr, offset_dma;
1778 uint32_t *iv_s, iv[4], j;
1779 vq_cmd_word0_t vq_cmd_w0;
1780 vq_cmd_word3_t vq_cmd_w3;
1781 opcode_info_t opcode;
1783 buf_p = ¶ms->meta_buf;
1784 m_vaddr = buf_p->vaddr;
1785 m_dma = buf_p->dma_addr;
1786 m_size = buf_p->size;
1789 * Microcode expects offsets in bytes
1790 * TODO: Rounding off
1792 encr_offset = ENCR_OFFSET(d_offs) / 8;
1793 encr_data_len = ENCR_DLEN(d_lens);
1795 cpt_ctx = params->ctx_buf.vaddr;
1796 flags = cpt_ctx->zsk_flags;
1797 snow3g = cpt_ctx->snow3g;
1799 * Save initial space that followed app data for completion code &
1800 * alternate completion code to fall in same cache line as app data
1802 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1803 m_dma += COMPLETION_CODE_SIZE;
1804 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1807 c_vaddr = (uint8_t *)m_vaddr + size;
1808 c_dma = m_dma + size;
1809 size += sizeof(cpt_res_s_t);
1811 m_vaddr = (uint8_t *)m_vaddr + size;
1815 /* Reserve memory for cpt request info */
1818 size = sizeof(struct cpt_request_info);
1819 m_vaddr = (uint8_t *)m_vaddr + size;
1823 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1825 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1826 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1827 (0 << 3) | (flags & 0x7));
1829 /* consider iv len */
1830 encr_offset += iv_len;
1832 inputlen = encr_offset +
1833 (RTE_ALIGN(encr_data_len, 8) / 8);
1834 outputlen = inputlen;
1837 iv_s = params->iv_buf;
1840 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1841 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1844 for (j = 0; j < 4; j++)
1845 iv[j] = iv_s[3 - j];
1847 /* ZUC doesn't need a swap */
1848 for (j = 0; j < 4; j++)
1853 * GP op header, lengths are expected in bits.
1856 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1859 * In 83XX since we have a limitation of
1860 * IV & Offset control word not part of instruction
1861 * and need to be part of Data Buffer, we check if
1862 * head room is there and then only do the Direct mode processing
1864 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1865 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1866 void *dm_vaddr = params->bufs[0].vaddr;
1867 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1869 * This flag indicates that there is 24 bytes head room and
1870 * 8 bytes tail room available, so that we get to do
1871 * DIRECT MODE with limitation
1874 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1875 OFF_CTRL_LEN - iv_len);
1876 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1879 req->ist.ei1 = offset_dma;
1880 /* RPTR should just exclude offset control word */
1881 req->ist.ei2 = dm_dma_addr - iv_len;
1882 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1883 + outputlen - iv_len);
1885 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1887 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1889 if (likely(iv_len)) {
1890 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1892 memcpy(iv_d, iv, 16);
1895 /* iv offset is 0 */
1896 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1898 uint32_t i, g_size_bytes, s_size_bytes;
1899 uint64_t dptr_dma, rptr_dma;
1900 sg_comp_t *gather_comp;
1901 sg_comp_t *scatter_comp;
1905 /* save space for offset and iv... */
1906 offset_vaddr = m_vaddr;
1909 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1910 m_dma += OFF_CTRL_LEN + iv_len;
1911 m_size -= OFF_CTRL_LEN + iv_len;
1913 opcode.s.major |= CPT_DMA_MODE;
1915 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1917 /* DPTR has SG list */
1918 in_buffer = m_vaddr;
1921 ((uint16_t *)in_buffer)[0] = 0;
1922 ((uint16_t *)in_buffer)[1] = 0;
1924 /* TODO Add error check if space will be sufficient */
1925 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1932 /* Offset control word */
1934 /* iv offset is 0 */
1935 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1937 i = fill_sg_comp(gather_comp, i, offset_dma,
1938 OFF_CTRL_LEN + iv_len);
1940 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1941 memcpy(iv_d, iv, 16);
1943 /* Add input data */
1944 size = inputlen - iv_len;
1946 i = fill_sg_comp_from_iov(gather_comp, i,
1950 return ERR_BAD_INPUT_ARG;
1952 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1953 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1956 * Output Scatter List
1961 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1964 i = fill_sg_comp(scatter_comp, i,
1965 offset_dma + OFF_CTRL_LEN,
1968 /* Add output data */
1969 size = outputlen - iv_len;
1971 i = fill_sg_comp_from_iov(scatter_comp, i,
1976 return ERR_BAD_INPUT_ARG;
1978 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1979 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1981 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1983 /* This is DPTR len incase of SG mode */
1984 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1986 m_vaddr = (uint8_t *)m_vaddr + size;
1990 /* cpt alternate completion address saved earlier */
1991 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1992 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1993 rptr_dma = c_dma - 8;
1995 req->ist.ei1 = dptr_dma;
1996 req->ist.ei2 = rptr_dma;
1999 /* First 16-bit swap then 64-bit swap */
2000 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2001 * to eliminate all the swapping
2003 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2007 vq_cmd_w3.s.grp = 0;
2008 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2009 offsetof(struct cpt_ctx, zs_ctx);
2011 /* 16 byte aligned cpt res address */
2012 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2013 *req->completion_addr = COMPLETION_CODE_INIT;
2014 req->comp_baddr = c_dma;
2016 /* Fill microcode part of instruction */
2017 req->ist.ei0 = vq_cmd_w0.u64;
2018 req->ist.ei3 = vq_cmd_w3.u64;
2026 static __rte_always_inline int
2027 cpt_kasumi_enc_prep(uint32_t req_flags,
2030 fc_params_t *params,
2035 int32_t inputlen = 0, outputlen = 0;
2036 struct cpt_ctx *cpt_ctx;
2037 uint32_t mac_len = 0;
2039 struct cpt_request_info *req;
2041 uint32_t encr_offset, auth_offset;
2042 uint32_t encr_data_len, auth_data_len;
2044 uint8_t *iv_s, *iv_d, iv_len = 8;
2046 void *m_vaddr, *c_vaddr;
2047 uint64_t m_dma, c_dma;
2048 uint64_t *offset_vaddr, offset_dma;
2049 vq_cmd_word0_t vq_cmd_w0;
2050 vq_cmd_word3_t vq_cmd_w3;
2051 opcode_info_t opcode;
2053 uint32_t g_size_bytes, s_size_bytes;
2054 uint64_t dptr_dma, rptr_dma;
2055 sg_comp_t *gather_comp;
2056 sg_comp_t *scatter_comp;
2058 buf_p = ¶ms->meta_buf;
2059 m_vaddr = buf_p->vaddr;
2060 m_dma = buf_p->dma_addr;
2061 m_size = buf_p->size;
2063 encr_offset = ENCR_OFFSET(d_offs) / 8;
2064 auth_offset = AUTH_OFFSET(d_offs) / 8;
2065 encr_data_len = ENCR_DLEN(d_lens);
2066 auth_data_len = AUTH_DLEN(d_lens);
2068 cpt_ctx = params->ctx_buf.vaddr;
2069 flags = cpt_ctx->zsk_flags;
2070 mac_len = cpt_ctx->mac_len;
2073 iv_s = params->iv_buf;
2075 iv_s = params->auth_iv_buf;
2077 dir = iv_s[8] & 0x1;
2080 * Save initial space that followed app data for completion code &
2081 * alternate completion code to fall in same cache line as app data
2083 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2084 m_dma += COMPLETION_CODE_SIZE;
2085 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2088 c_vaddr = (uint8_t *)m_vaddr + size;
2089 c_dma = m_dma + size;
2090 size += sizeof(cpt_res_s_t);
2092 m_vaddr = (uint8_t *)m_vaddr + size;
2096 /* Reserve memory for cpt request info */
2099 size = sizeof(struct cpt_request_info);
2100 m_vaddr = (uint8_t *)m_vaddr + size;
2104 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2106 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2107 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2108 (dir << 4) | (0 << 3) | (flags & 0x7));
2111 * GP op header, lengths are expected in bits.
2114 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
2115 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
2116 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
2118 /* consider iv len */
2120 encr_offset += iv_len;
2121 auth_offset += iv_len;
2124 /* save space for offset ctrl and iv */
2125 offset_vaddr = m_vaddr;
2128 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2129 m_dma += OFF_CTRL_LEN + iv_len;
2130 m_size -= OFF_CTRL_LEN + iv_len;
2132 /* DPTR has SG list */
2133 in_buffer = m_vaddr;
2136 ((uint16_t *)in_buffer)[0] = 0;
2137 ((uint16_t *)in_buffer)[1] = 0;
2139 /* TODO Add error check if space will be sufficient */
2140 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2147 /* Offset control word followed by iv */
2150 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2151 outputlen = inputlen;
2152 /* iv offset is 0 */
2153 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2155 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2156 outputlen = mac_len;
2157 /* iv offset is 0 */
2158 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2161 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2164 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2165 memcpy(iv_d, iv_s, iv_len);
2168 size = inputlen - iv_len;
2170 i = fill_sg_comp_from_iov(gather_comp, i,
2175 return ERR_BAD_INPUT_ARG;
2177 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2178 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2181 * Output Scatter List
2185 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2188 /* IV in SLIST only for F8 */
2194 i = fill_sg_comp(scatter_comp, i,
2195 offset_dma + OFF_CTRL_LEN,
2199 /* Add output data */
2200 if (req_flags & VALID_MAC_BUF) {
2201 size = outputlen - iv_len - mac_len;
2203 i = fill_sg_comp_from_iov(scatter_comp, i,
2208 return ERR_BAD_INPUT_ARG;
2213 i = fill_sg_comp_from_buf(scatter_comp, i,
2217 /* Output including mac */
2218 size = outputlen - iv_len;
2220 i = fill_sg_comp_from_iov(scatter_comp, i,
2225 return ERR_BAD_INPUT_ARG;
2228 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2229 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2231 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2233 /* This is DPTR len incase of SG mode */
2234 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2236 m_vaddr = (uint8_t *)m_vaddr + size;
2240 /* cpt alternate completion address saved earlier */
2241 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2242 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2243 rptr_dma = c_dma - 8;
2245 req->ist.ei1 = dptr_dma;
2246 req->ist.ei2 = rptr_dma;
2248 /* First 16-bit swap then 64-bit swap */
2249 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2250 * to eliminate all the swapping
2252 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2256 vq_cmd_w3.s.grp = 0;
2257 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2258 offsetof(struct cpt_ctx, k_ctx);
2260 /* 16 byte aligned cpt res address */
2261 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2262 *req->completion_addr = COMPLETION_CODE_INIT;
2263 req->comp_baddr = c_dma;
2265 /* Fill microcode part of instruction */
2266 req->ist.ei0 = vq_cmd_w0.u64;
2267 req->ist.ei3 = vq_cmd_w3.u64;
2275 static __rte_always_inline int
2276 cpt_kasumi_dec_prep(uint64_t d_offs,
2278 fc_params_t *params,
2283 int32_t inputlen = 0, outputlen;
2284 struct cpt_ctx *cpt_ctx;
2285 uint8_t i = 0, iv_len = 8;
2286 struct cpt_request_info *req;
2288 uint32_t encr_offset;
2289 uint32_t encr_data_len;
2292 void *m_vaddr, *c_vaddr;
2293 uint64_t m_dma, c_dma;
2294 uint64_t *offset_vaddr, offset_dma;
2295 vq_cmd_word0_t vq_cmd_w0;
2296 vq_cmd_word3_t vq_cmd_w3;
2297 opcode_info_t opcode;
2299 uint32_t g_size_bytes, s_size_bytes;
2300 uint64_t dptr_dma, rptr_dma;
2301 sg_comp_t *gather_comp;
2302 sg_comp_t *scatter_comp;
2304 buf_p = ¶ms->meta_buf;
2305 m_vaddr = buf_p->vaddr;
2306 m_dma = buf_p->dma_addr;
2307 m_size = buf_p->size;
2309 encr_offset = ENCR_OFFSET(d_offs) / 8;
2310 encr_data_len = ENCR_DLEN(d_lens);
2312 cpt_ctx = params->ctx_buf.vaddr;
2313 flags = cpt_ctx->zsk_flags;
2315 * Save initial space that followed app data for completion code &
2316 * alternate completion code to fall in same cache line as app data
2318 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2319 m_dma += COMPLETION_CODE_SIZE;
2320 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2323 c_vaddr = (uint8_t *)m_vaddr + size;
2324 c_dma = m_dma + size;
2325 size += sizeof(cpt_res_s_t);
2327 m_vaddr = (uint8_t *)m_vaddr + size;
2331 /* Reserve memory for cpt request info */
2334 size = sizeof(struct cpt_request_info);
2335 m_vaddr = (uint8_t *)m_vaddr + size;
2339 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2341 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2342 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2343 (dir << 4) | (0 << 3) | (flags & 0x7));
2346 * GP op header, lengths are expected in bits.
2349 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
2350 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
2352 /* consider iv len */
2353 encr_offset += iv_len;
2355 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2356 outputlen = inputlen;
2358 /* save space for offset ctrl & iv */
2359 offset_vaddr = m_vaddr;
2362 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2363 m_dma += OFF_CTRL_LEN + iv_len;
2364 m_size -= OFF_CTRL_LEN + iv_len;
2366 /* DPTR has SG list */
2367 in_buffer = m_vaddr;
2370 ((uint16_t *)in_buffer)[0] = 0;
2371 ((uint16_t *)in_buffer)[1] = 0;
2373 /* TODO Add error check if space will be sufficient */
2374 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2381 /* Offset control word followed by iv */
2382 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2384 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2387 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2388 params->iv_buf, iv_len);
2390 /* Add input data */
2391 size = inputlen - iv_len;
2393 i = fill_sg_comp_from_iov(gather_comp, i,
2397 return ERR_BAD_INPUT_ARG;
2399 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2400 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2403 * Output Scatter List
2407 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2410 i = fill_sg_comp(scatter_comp, i,
2411 offset_dma + OFF_CTRL_LEN,
2414 /* Add output data */
2415 size = outputlen - iv_len;
2417 i = fill_sg_comp_from_iov(scatter_comp, i,
2421 return ERR_BAD_INPUT_ARG;
2423 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2424 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2426 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2428 /* This is DPTR len incase of SG mode */
2429 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2431 m_vaddr = (uint8_t *)m_vaddr + size;
2435 /* cpt alternate completion address saved earlier */
2436 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2437 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2438 rptr_dma = c_dma - 8;
2440 req->ist.ei1 = dptr_dma;
2441 req->ist.ei2 = rptr_dma;
2443 /* First 16-bit swap then 64-bit swap */
2444 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2445 * to eliminate all the swapping
2447 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2451 vq_cmd_w3.s.grp = 0;
2452 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2453 offsetof(struct cpt_ctx, k_ctx);
2455 /* 16 byte aligned cpt res address */
2456 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2457 *req->completion_addr = COMPLETION_CODE_INIT;
2458 req->comp_baddr = c_dma;
2460 /* Fill microcode part of instruction */
2461 req->ist.ei0 = vq_cmd_w0.u64;
2462 req->ist.ei3 = vq_cmd_w3.u64;
2470 static __rte_always_inline void *
2471 cpt_fc_dec_hmac_prep(uint32_t flags,
2474 fc_params_t *fc_params,
2475 void *op, int *ret_val)
2477 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2479 void *prep_req = NULL;
2482 fc_type = ctx->fc_type;
2484 if (likely(fc_type == FC_GEN)) {
2485 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens,
2486 fc_params, op, &prep_req);
2487 } else if (fc_type == ZUC_SNOW3G) {
2488 ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens,
2489 fc_params, op, &prep_req);
2490 } else if (fc_type == KASUMI) {
2491 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op,
2495 * For AUTH_ONLY case,
2496 * MC only supports digest generation and verification
2497 * should be done in software by memcmp()
2503 if (unlikely(!prep_req))
2508 static __rte_always_inline void *__hot
2509 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2510 fc_params_t *fc_params, void *op, int *ret_val)
2512 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2514 void *prep_req = NULL;
2517 fc_type = ctx->fc_type;
2519 /* Common api for rest of the ops */
2520 if (likely(fc_type == FC_GEN)) {
2521 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens,
2522 fc_params, op, &prep_req);
2523 } else if (fc_type == ZUC_SNOW3G) {
2524 ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens,
2525 fc_params, op, &prep_req);
2526 } else if (fc_type == KASUMI) {
2527 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens,
2528 fc_params, op, &prep_req);
2529 } else if (fc_type == HASH_HMAC) {
2530 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, op,
2536 if (unlikely(!prep_req))
2541 static __rte_always_inline int
2542 cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
2543 uint16_t key_len, uint16_t mac_len)
2545 struct cpt_ctx *cpt_ctx = ctx;
2546 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2547 uint64_t *ctrl_flags = NULL;
2549 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2554 /* No support for AEAD yet */
2555 if (cpt_ctx->enc_cipher)
2557 /* For ZUC/SNOW3G/Kasumi */
2560 cpt_ctx->snow3g = 1;
2561 gen_key_snow3g(key, keyx);
2562 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2563 cpt_ctx->fc_type = ZUC_SNOW3G;
2564 cpt_ctx->zsk_flags = 0x1;
2567 cpt_ctx->snow3g = 0;
2568 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2569 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2570 cpt_ctx->fc_type = ZUC_SNOW3G;
2571 cpt_ctx->zsk_flags = 0x1;
2574 /* Kasumi ECB mode */
2576 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2577 cpt_ctx->fc_type = KASUMI;
2578 cpt_ctx->zsk_flags = 0x1;
2581 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2582 cpt_ctx->fc_type = KASUMI;
2583 cpt_ctx->zsk_flags = 0x1;
2588 cpt_ctx->mac_len = 4;
2589 cpt_ctx->hash_type = type;
2593 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2594 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2595 cpt_ctx->fc_type = HASH_HMAC;
2598 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
2599 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
2601 /* For GMAC auth, cipher must be NULL */
2602 if (type == GMAC_TYPE)
2603 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
2605 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
2606 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
2610 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2611 memcpy(cpt_ctx->auth_key, key, key_len);
2612 cpt_ctx->auth_key_len = key_len;
2613 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2614 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2615 memcpy(fctx->hmac.opad, key, key_len);
2616 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
2618 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
2622 static __rte_always_inline int
2623 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2624 struct cpt_sess_misc *sess)
2626 struct rte_crypto_aead_xform *aead_form;
2627 cipher_type_t enc_type = 0; /* NULL Cipher type */
2628 auth_type_t auth_type = 0; /* NULL Auth type */
2629 uint32_t cipher_key_len = 0;
2630 uint8_t zsk_flag = 0, aes_gcm = 0;
2631 aead_form = &xform->aead;
2634 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2635 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2636 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2637 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2638 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2639 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2640 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2641 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2643 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2646 switch (aead_form->algo) {
2647 case RTE_CRYPTO_AEAD_AES_GCM:
2649 cipher_key_len = 16;
2652 case RTE_CRYPTO_AEAD_AES_CCM:
2653 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2657 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2661 if (aead_form->key.length < cipher_key_len) {
2662 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2663 (unsigned int long)aead_form->key.length);
2666 sess->zsk_flag = zsk_flag;
2667 sess->aes_gcm = aes_gcm;
2668 sess->mac_len = aead_form->digest_length;
2669 sess->iv_offset = aead_form->iv.offset;
2670 sess->iv_length = aead_form->iv.length;
2671 sess->aad_length = aead_form->aad_length;
2672 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2674 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2675 aead_form->key.length, NULL);
2677 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2682 static __rte_always_inline int
2683 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2684 struct cpt_sess_misc *sess)
2686 struct rte_crypto_cipher_xform *c_form;
2687 cipher_type_t enc_type = 0; /* NULL Cipher type */
2688 uint32_t cipher_key_len = 0;
2689 uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
2691 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
2694 c_form = &xform->cipher;
2696 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2697 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2698 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2699 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2701 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2705 switch (c_form->algo) {
2706 case RTE_CRYPTO_CIPHER_AES_CBC:
2708 cipher_key_len = 16;
2710 case RTE_CRYPTO_CIPHER_3DES_CBC:
2711 enc_type = DES3_CBC;
2712 cipher_key_len = 24;
2714 case RTE_CRYPTO_CIPHER_DES_CBC:
2715 /* DES is implemented using 3DES in hardware */
2716 enc_type = DES3_CBC;
2719 case RTE_CRYPTO_CIPHER_AES_CTR:
2721 cipher_key_len = 16;
2724 case RTE_CRYPTO_CIPHER_NULL:
2728 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2729 enc_type = KASUMI_F8_ECB;
2730 cipher_key_len = 16;
2733 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2734 enc_type = SNOW3G_UEA2;
2735 cipher_key_len = 16;
2738 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2739 enc_type = ZUC_EEA3;
2740 cipher_key_len = 16;
2743 case RTE_CRYPTO_CIPHER_AES_XTS:
2745 cipher_key_len = 16;
2747 case RTE_CRYPTO_CIPHER_3DES_ECB:
2748 enc_type = DES3_ECB;
2749 cipher_key_len = 24;
2751 case RTE_CRYPTO_CIPHER_AES_ECB:
2753 cipher_key_len = 16;
2755 case RTE_CRYPTO_CIPHER_3DES_CTR:
2756 case RTE_CRYPTO_CIPHER_AES_F8:
2757 case RTE_CRYPTO_CIPHER_ARC4:
2758 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2762 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2767 if (c_form->key.length < cipher_key_len) {
2768 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2769 (unsigned long) c_form->key.length);
2773 sess->zsk_flag = zsk_flag;
2774 sess->aes_gcm = aes_gcm;
2775 sess->aes_ctr = aes_ctr;
2776 sess->iv_offset = c_form->iv.offset;
2777 sess->iv_length = c_form->iv.length;
2778 sess->is_null = is_null;
2780 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2781 c_form->key.length, NULL);
2786 static __rte_always_inline int
2787 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2788 struct cpt_sess_misc *sess)
2790 struct rte_crypto_auth_xform *a_form;
2791 auth_type_t auth_type = 0; /* NULL Auth type */
2792 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2794 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2797 a_form = &xform->auth;
2799 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2800 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2801 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2802 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2804 CPT_LOG_DP_ERR("Unknown auth operation");
2808 if (a_form->key.length > 64) {
2809 CPT_LOG_DP_ERR("Auth key length is big");
2813 switch (a_form->algo) {
2814 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2816 case RTE_CRYPTO_AUTH_SHA1:
2817 auth_type = SHA1_TYPE;
2819 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2820 case RTE_CRYPTO_AUTH_SHA256:
2821 auth_type = SHA2_SHA256;
2823 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2824 case RTE_CRYPTO_AUTH_SHA512:
2825 auth_type = SHA2_SHA512;
2827 case RTE_CRYPTO_AUTH_AES_GMAC:
2828 auth_type = GMAC_TYPE;
2831 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2832 case RTE_CRYPTO_AUTH_SHA224:
2833 auth_type = SHA2_SHA224;
2835 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2836 case RTE_CRYPTO_AUTH_SHA384:
2837 auth_type = SHA2_SHA384;
2839 case RTE_CRYPTO_AUTH_MD5_HMAC:
2840 case RTE_CRYPTO_AUTH_MD5:
2841 auth_type = MD5_TYPE;
2843 case RTE_CRYPTO_AUTH_KASUMI_F9:
2844 auth_type = KASUMI_F9_ECB;
2846 * Indicate that direction needs to be taken out
2851 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2852 auth_type = SNOW3G_UIA2;
2855 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2856 auth_type = ZUC_EIA3;
2859 case RTE_CRYPTO_AUTH_NULL:
2863 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2864 case RTE_CRYPTO_AUTH_AES_CMAC:
2865 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2866 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2870 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2875 sess->zsk_flag = zsk_flag;
2876 sess->aes_gcm = aes_gcm;
2877 sess->mac_len = a_form->digest_length;
2878 sess->is_null = is_null;
2880 sess->auth_iv_offset = a_form->iv.offset;
2881 sess->auth_iv_length = a_form->iv.length;
2883 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2884 a_form->key.length, a_form->digest_length);
2892 static __rte_always_inline int
2893 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2894 struct cpt_sess_misc *sess)
2896 struct rte_crypto_auth_xform *a_form;
2897 cipher_type_t enc_type = 0; /* NULL Cipher type */
2898 auth_type_t auth_type = 0; /* NULL Auth type */
2899 uint8_t zsk_flag = 0, aes_gcm = 0;
2902 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2905 a_form = &xform->auth;
2907 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2908 sess->cpt_op |= CPT_OP_ENCODE;
2909 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2910 sess->cpt_op |= CPT_OP_DECODE;
2912 CPT_LOG_DP_ERR("Unknown auth operation");
2916 switch (a_form->algo) {
2917 case RTE_CRYPTO_AUTH_AES_GMAC:
2919 auth_type = GMAC_TYPE;
2922 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2927 sess->zsk_flag = zsk_flag;
2928 sess->aes_gcm = aes_gcm;
2930 sess->iv_offset = a_form->iv.offset;
2931 sess->iv_length = a_form->iv.length;
2932 sess->mac_len = a_form->digest_length;
2933 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2935 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2936 a_form->key.length, NULL);
2937 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2942 static __rte_always_inline void *
2943 alloc_op_meta(struct rte_mbuf *m_src,
2946 struct rte_mempool *cpt_meta_pool)
2950 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2951 if (likely(m_src && (m_src->nb_segs == 1))) {
2955 /* Check if tailroom is sufficient to hold meta data */
2956 tailroom = rte_pktmbuf_tailroom(m_src);
2957 if (likely(tailroom > len + 8)) {
2958 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2959 mphys = m_src->buf_physaddr + m_src->buf_len;
2963 buf->dma_addr = mphys;
2965 /* Indicate that this is a mbuf allocated mdata */
2966 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2971 RTE_SET_USED(m_src);
2974 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2978 buf->dma_addr = rte_mempool_virt2iova(mdata);
2985 * cpt_free_metabuf - free metabuf to mempool.
2986 * @param instance: pointer to instance.
2987 * @param objp: pointer to the metabuf.
2989 static __rte_always_inline void
2990 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2992 bool nofree = ((uintptr_t)mdata & 1ull);
2996 rte_mempool_put(cpt_meta_pool, mdata);
2999 static __rte_always_inline uint32_t
3000 prepare_iov_from_pkt(struct rte_mbuf *pkt,
3001 iov_ptr_t *iovec, uint32_t start_offset)
3004 void *seg_data = NULL;
3005 phys_addr_t seg_phys;
3006 int32_t seg_size = 0;
3013 if (!start_offset) {
3014 seg_data = rte_pktmbuf_mtod(pkt, void *);
3015 seg_phys = rte_pktmbuf_mtophys(pkt);
3016 seg_size = pkt->data_len;
3018 while (start_offset >= pkt->data_len) {
3019 start_offset -= pkt->data_len;
3023 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
3024 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
3025 seg_size = pkt->data_len - start_offset;
3031 iovec->bufs[index].vaddr = seg_data;
3032 iovec->bufs[index].dma_addr = seg_phys;
3033 iovec->bufs[index].size = seg_size;
3037 while (unlikely(pkt != NULL)) {
3038 seg_data = rte_pktmbuf_mtod(pkt, void *);
3039 seg_phys = rte_pktmbuf_mtophys(pkt);
3040 seg_size = pkt->data_len;
3044 iovec->bufs[index].vaddr = seg_data;
3045 iovec->bufs[index].dma_addr = seg_phys;
3046 iovec->bufs[index].size = seg_size;
3053 iovec->buf_cnt = index;
3057 static __rte_always_inline uint32_t
3058 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3063 void *seg_data = NULL;
3064 phys_addr_t seg_phys;
3065 uint32_t seg_size = 0;
3068 seg_data = rte_pktmbuf_mtod(pkt, void *);
3069 seg_phys = rte_pktmbuf_mtophys(pkt);
3070 seg_size = pkt->data_len;
3073 if (likely(!pkt->next)) {
3074 uint32_t headroom, tailroom;
3076 *flags |= SINGLE_BUF_INPLACE;
3077 headroom = rte_pktmbuf_headroom(pkt);
3078 tailroom = rte_pktmbuf_tailroom(pkt);
3079 if (likely((headroom >= 24) &&
3081 /* In 83XX this is prerequivisit for Direct mode */
3082 *flags |= SINGLE_BUF_HEADTAILROOM;
3084 param->bufs[0].vaddr = seg_data;
3085 param->bufs[0].dma_addr = seg_phys;
3086 param->bufs[0].size = seg_size;
3089 iovec = param->src_iov;
3090 iovec->bufs[index].vaddr = seg_data;
3091 iovec->bufs[index].dma_addr = seg_phys;
3092 iovec->bufs[index].size = seg_size;
3096 while (unlikely(pkt != NULL)) {
3097 seg_data = rte_pktmbuf_mtod(pkt, void *);
3098 seg_phys = rte_pktmbuf_mtophys(pkt);
3099 seg_size = pkt->data_len;
3104 iovec->bufs[index].vaddr = seg_data;
3105 iovec->bufs[index].dma_addr = seg_phys;
3106 iovec->bufs[index].size = seg_size;
3113 iovec->buf_cnt = index;
3117 static __rte_always_inline void *
3118 fill_fc_params(struct rte_crypto_op *cop,
3119 struct cpt_sess_misc *sess_misc,
3124 struct rte_crypto_sym_op *sym_op = cop->sym;
3127 uint32_t mc_hash_off;
3129 uint64_t d_offs, d_lens;
3130 void *prep_req = NULL;
3131 struct rte_mbuf *m_src, *m_dst;
3132 uint8_t cpt_op = sess_misc->cpt_op;
3133 uint8_t zsk_flag = sess_misc->zsk_flag;
3134 uint8_t aes_gcm = sess_misc->aes_gcm;
3135 uint16_t mac_len = sess_misc->mac_len;
3136 #ifdef CPT_ALWAYS_USE_SG_MODE
3137 uint8_t inplace = 0;
3139 uint8_t inplace = 1;
3141 fc_params_t fc_params;
3142 char src[SRC_IOV_SIZE];
3143 char dst[SRC_IOV_SIZE];
3145 struct cptvf_meta_info *cpt_m_info =
3146 (struct cptvf_meta_info *)(*mdata_ptr);
3148 if (likely(sess_misc->iv_length)) {
3149 flags |= VALID_IV_BUF;
3150 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3151 uint8_t *, sess_misc->iv_offset);
3152 if (sess_misc->aes_ctr &&
3153 unlikely(sess_misc->iv_length != 16)) {
3154 memcpy((uint8_t *)iv_buf,
3155 rte_crypto_op_ctod_offset(cop,
3156 uint8_t *, sess_misc->iv_offset), 12);
3157 iv_buf[3] = rte_cpu_to_be_32(0x1);
3158 fc_params.iv_buf = iv_buf;
3163 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3165 sess_misc->auth_iv_offset);
3166 if (zsk_flag == K_F9) {
3167 CPT_LOG_DP_ERR("Should not reach here for "
3170 if (zsk_flag != ZS_EA)
3173 m_src = sym_op->m_src;
3174 m_dst = sym_op->m_dst;
3181 d_offs = sym_op->aead.data.offset;
3182 d_lens = sym_op->aead.data.length;
3183 mc_hash_off = sym_op->aead.data.offset +
3184 sym_op->aead.data.length;
3186 aad_data = sym_op->aead.aad.data;
3187 aad_len = sess_misc->aad_length;
3188 if (likely((aad_data + aad_len) ==
3189 rte_pktmbuf_mtod_offset(m_src,
3191 sym_op->aead.data.offset))) {
3192 d_offs = (d_offs - aad_len) | (d_offs << 16);
3193 d_lens = (d_lens + aad_len) | (d_lens << 32);
3195 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3196 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3197 fc_params.aad_buf.size = aad_len;
3198 flags |= VALID_AAD_BUF;
3200 d_offs = d_offs << 16;
3201 d_lens = d_lens << 32;
3204 salt = fc_params.iv_buf;
3205 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3206 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3207 sess_misc->salt = *(uint32_t *)salt;
3209 fc_params.iv_buf = salt + 4;
3210 if (likely(mac_len)) {
3211 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3217 /* hmac immediately following data is best case */
3218 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3220 (uint8_t *)sym_op->aead.digest.data)) {
3221 flags |= VALID_MAC_BUF;
3222 fc_params.mac_buf.size = sess_misc->mac_len;
3223 fc_params.mac_buf.vaddr =
3224 sym_op->aead.digest.data;
3225 fc_params.mac_buf.dma_addr =
3226 sym_op->aead.digest.phys_addr;
3231 d_offs = sym_op->cipher.data.offset;
3232 d_lens = sym_op->cipher.data.length;
3233 mc_hash_off = sym_op->cipher.data.offset +
3234 sym_op->cipher.data.length;
3235 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3236 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3238 if (mc_hash_off < (sym_op->auth.data.offset +
3239 sym_op->auth.data.length)){
3240 mc_hash_off = (sym_op->auth.data.offset +
3241 sym_op->auth.data.length);
3243 /* for gmac, salt should be updated like in gcm */
3244 if (unlikely(sess_misc->is_gmac)) {
3246 salt = fc_params.iv_buf;
3247 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3248 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3249 sess_misc->salt = *(uint32_t *)salt;
3251 fc_params.iv_buf = salt + 4;
3253 if (likely(mac_len)) {
3256 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3260 /* hmac immediately following data is best case */
3261 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3263 (uint8_t *)sym_op->auth.digest.data)) {
3264 flags |= VALID_MAC_BUF;
3265 fc_params.mac_buf.size =
3267 fc_params.mac_buf.vaddr =
3268 sym_op->auth.digest.data;
3269 fc_params.mac_buf.dma_addr =
3270 sym_op->auth.digest.phys_addr;
3275 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3276 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3278 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3281 if (likely(!m_dst && inplace)) {
3282 /* Case of single buffer without AAD buf or
3283 * separate mac buf in place and
3286 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3288 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3291 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3297 /* Out of place processing */
3298 fc_params.src_iov = (void *)src;
3299 fc_params.dst_iov = (void *)dst;
3301 /* Store SG I/O in the api for reuse */
3302 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3303 CPT_LOG_DP_ERR("Prepare src iov failed");
3308 if (unlikely(m_dst != NULL)) {
3311 /* Try to make room as much as src has */
3312 m_dst = sym_op->m_dst;
3313 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3315 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3316 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3317 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3318 CPT_LOG_DP_ERR("Not enough space in "
3326 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3327 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3332 fc_params.dst_iov = (void *)src;
3336 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3337 mdata = alloc_op_meta(m_src,
3338 &fc_params.meta_buf,
3339 cpt_m_info->cptvf_op_sb_mlen,
3340 cpt_m_info->cptvf_meta_pool);
3342 mdata = alloc_op_meta(NULL,
3343 &fc_params.meta_buf,
3344 cpt_m_info->cptvf_op_mlen,
3345 cpt_m_info->cptvf_meta_pool);
3347 if (unlikely(mdata == NULL)) {
3348 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3352 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3353 op[0] = (uintptr_t)mdata;
3354 op[1] = (uintptr_t)cop;
3355 op[2] = op[3] = 0; /* Used to indicate auth verify */
3356 space += 4 * sizeof(uint64_t);
3358 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3359 fc_params.meta_buf.dma_addr += space;
3360 fc_params.meta_buf.size -= space;
3362 /* Finally prepare the instruction */
3363 if (cpt_op & CPT_OP_ENCODE)
3364 prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3365 &fc_params, op, op_ret);
3367 prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3368 &fc_params, op, op_ret);
3370 if (unlikely(!prep_req))
3371 free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
3376 static __rte_always_inline void
3377 compl_auth_verify(struct rte_crypto_op *op,
3382 struct rte_crypto_sym_op *sym_op = op->sym;
3384 if (sym_op->auth.digest.data)
3385 mac = sym_op->auth.digest.data;
3387 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3389 sym_op->auth.data.length +
3390 sym_op->auth.data.offset);
3392 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3396 if (memcmp(mac, gen_mac, mac_len))
3397 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3399 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3402 static __rte_always_inline int
3403 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3405 struct rte_crypto_sym_xform *chain;
3407 CPT_PMD_INIT_FUNC_TRACE();
3409 if (cpt_is_algo_supported(xform))
3414 switch (chain->type) {
3415 case RTE_CRYPTO_SYM_XFORM_AEAD:
3416 if (fill_sess_aead(chain, sess))
3419 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3420 if (fill_sess_cipher(chain, sess))
3423 case RTE_CRYPTO_SYM_XFORM_AUTH:
3424 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3425 if (fill_sess_gmac(chain, sess))
3428 if (fill_sess_auth(chain, sess))
3433 CPT_LOG_DP_ERR("Invalid crypto xform type");
3436 chain = chain->next;
3445 static __rte_always_inline void
3446 find_kasumif9_direction_and_length(uint8_t *src,
3447 uint32_t counter_num_bytes,
3448 uint32_t *addr_length_in_bits,
3449 uint8_t *addr_direction)
3452 while (!found && counter_num_bytes > 0) {
3453 counter_num_bytes--;
3454 if (src[counter_num_bytes] == 0x00)
3456 if (src[counter_num_bytes] == 0x80) {
3457 *addr_direction = src[counter_num_bytes - 1] & 0x1;
3458 *addr_length_in_bits = counter_num_bytes * 8 - 1;
3462 uint8_t last_byte = src[counter_num_bytes];
3463 for (i = 0; i < 8 && found == 0; i++) {
3464 if (last_byte & (1 << i)) {
3465 *addr_direction = (last_byte >> (i+1))
3468 *addr_length_in_bits =
3469 counter_num_bytes * 8
3472 *addr_length_in_bits =
3473 counter_num_bytes * 8;
3482 * This handles all auth only except AES_GMAC
3484 static __rte_always_inline void *
3485 fill_digest_params(struct rte_crypto_op *cop,
3486 struct cpt_sess_misc *sess,
3491 struct rte_crypto_sym_op *sym_op = cop->sym;
3495 uint32_t auth_range_off;
3497 uint64_t d_offs = 0, d_lens;
3498 void *prep_req = NULL;
3499 struct rte_mbuf *m_src, *m_dst;
3500 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3501 uint8_t zsk_flag = sess->zsk_flag;
3502 uint16_t mac_len = sess->mac_len;
3504 char src[SRC_IOV_SIZE];
3506 memset(¶ms, 0, sizeof(fc_params_t));
3507 struct cptvf_meta_info *cpt_m_info =
3508 (struct cptvf_meta_info *)(*mdata_ptr);
3510 m_src = sym_op->m_src;
3512 /* For just digest lets force mempool alloc */
3513 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, cpt_m_info->cptvf_op_mlen,
3514 cpt_m_info->cptvf_meta_pool);
3515 if (mdata == NULL) {
3516 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3521 mphys = params.meta_buf.dma_addr;
3524 op[0] = (uintptr_t)mdata;
3525 op[1] = (uintptr_t)cop;
3526 op[2] = op[3] = 0; /* Used to indicate auth verify */
3527 space += 4 * sizeof(uint64_t);
3529 auth_range_off = sym_op->auth.data.offset;
3531 flags = VALID_MAC_BUF;
3532 params.src_iov = (void *)src;
3533 if (unlikely(zsk_flag)) {
3535 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3536 * we will send pass through even for auth only case,
3539 d_offs = auth_range_off;
3541 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3542 uint8_t *, sess->auth_iv_offset);
3543 if (zsk_flag == K_F9) {
3544 uint32_t length_in_bits, num_bytes;
3545 uint8_t *src, direction = 0;
3546 uint32_t counter_num_bytes;
3548 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3551 * This is kasumi f9, take direction from
3554 length_in_bits = cop->sym->auth.data.length;
3555 num_bytes = (length_in_bits >> 3);
3556 counter_num_bytes = num_bytes;
3557 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3558 find_kasumif9_direction_and_length(src,
3562 length_in_bits -= 64;
3563 cop->sym->auth.data.offset += 64;
3564 d_offs = cop->sym->auth.data.offset;
3565 auth_range_off = d_offs / 8;
3566 cop->sym->auth.data.length = length_in_bits;
3568 /* Store it at end of auth iv */
3569 iv_buf[8] = direction;
3570 params.auth_iv_buf = iv_buf;
3574 d_lens = sym_op->auth.data.length;
3576 params.ctx_buf.vaddr = SESS_PRIV(sess);
3577 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3579 if (auth_op == CPT_OP_AUTH_GENERATE) {
3580 if (sym_op->auth.digest.data) {
3582 * Digest to be generated
3583 * in separate buffer
3585 params.mac_buf.size =
3587 params.mac_buf.vaddr =
3588 sym_op->auth.digest.data;
3589 params.mac_buf.dma_addr =
3590 sym_op->auth.digest.phys_addr;
3592 uint32_t off = sym_op->auth.data.offset +
3593 sym_op->auth.data.length;
3594 int32_t dlen, space;
3596 m_dst = sym_op->m_dst ?
3597 sym_op->m_dst : sym_op->m_src;
3598 dlen = rte_pktmbuf_pkt_len(m_dst);
3600 space = off + mac_len - dlen;
3602 if (!rte_pktmbuf_append(m_dst, space)) {
3603 CPT_LOG_DP_ERR("Failed to extend "
3604 "mbuf by %uB", space);
3608 params.mac_buf.vaddr =
3609 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3610 params.mac_buf.dma_addr =
3611 rte_pktmbuf_mtophys_offset(m_dst, off);
3612 params.mac_buf.size = mac_len;
3615 /* Need space for storing generated mac */
3616 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3617 params.mac_buf.dma_addr = mphys + space;
3618 params.mac_buf.size = mac_len;
3619 space += RTE_ALIGN_CEIL(mac_len, 8);
3620 op[2] = (uintptr_t)params.mac_buf.vaddr;
3624 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3625 params.meta_buf.dma_addr = mphys + space;
3626 params.meta_buf.size -= space;
3628 /* Out of place processing */
3629 params.src_iov = (void *)src;
3631 /*Store SG I/O in the api for reuse */
3632 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3633 CPT_LOG_DP_ERR("Prepare src iov failed");
3638 prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3639 ¶ms, op, op_ret);
3643 if (unlikely(!prep_req))
3644 free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
3648 #endif /*_CPT_UCODE_H_ */