1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
10 #include "cpt_common.h"
11 #include "cpt_hw_types.h"
12 #include "cpt_mcode_defines.h"
15 * This file defines functions that are interfaces to microcode spec.
19 static uint8_t zuc_d[32] = {
20 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
21 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
22 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
23 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
26 static __rte_always_inline int
27 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
30 * Microcode only supports the following combination.
31 * Encryption followed by authentication
32 * Authentication followed by decryption
35 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
36 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
37 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
38 /* Unsupported as of now by microcode */
39 CPT_LOG_DP_ERR("Unsupported combination");
42 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
43 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
44 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
45 /* For GMAC auth there is no cipher operation */
46 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
47 xform->next->auth.algo !=
48 RTE_CRYPTO_AUTH_AES_GMAC) {
49 /* Unsupported as of now by microcode */
50 CPT_LOG_DP_ERR("Unsupported combination");
58 static __rte_always_inline void
59 gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
63 for (i = 0; i < 4; i++) {
65 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
66 (ck[base + 2] << 8) | (ck[base + 3]);
67 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
71 static __rte_always_inline void
72 cpt_fc_salt_update(void *ctx,
75 struct cpt_ctx *cpt_ctx = ctx;
76 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
79 static __rte_always_inline int
80 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
92 static __rte_always_inline int
93 cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
110 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
115 key_len = key_len / 2;
116 if (unlikely(key_len == CPT_BYTE_24)) {
117 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
120 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
126 if (unlikely(key_len != 16))
128 /* No support for AEAD yet */
129 if (unlikely(cpt_ctx->hash_type))
131 fc_type = ZUC_SNOW3G;
135 if (unlikely(key_len != 16))
137 /* No support for AEAD yet */
138 if (unlikely(cpt_ctx->hash_type))
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
151 cpt_ctx->enc_cipher = 0;
152 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
155 static __rte_always_inline void
156 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
158 mc_aes_type_t aes_key_type = 0;
161 aes_key_type = AES_128_BIT;
164 aes_key_type = AES_192_BIT;
167 aes_key_type = AES_256_BIT;
170 /* This should not happen */
171 CPT_LOG_DP_ERR("Invalid AES key len");
174 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
177 static __rte_always_inline void
178 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
183 gen_key_snow3g(key, keyx);
184 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
185 cpt_ctx->fc_type = ZUC_SNOW3G;
186 cpt_ctx->zsk_flags = 0;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
194 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
195 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
196 cpt_ctx->fc_type = ZUC_SNOW3G;
197 cpt_ctx->zsk_flags = 0;
200 static __rte_always_inline void
201 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
205 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
206 cpt_ctx->zsk_flags = 0;
207 cpt_ctx->fc_type = KASUMI;
210 static __rte_always_inline void
211 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
214 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
215 cpt_ctx->zsk_flags = 0;
216 cpt_ctx->fc_type = KASUMI;
219 static __rte_always_inline int
220 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
221 uint16_t key_len, uint8_t *salt)
223 struct cpt_ctx *cpt_ctx = ctx;
224 mc_fc_context_t *fctx = &cpt_ctx->fctx;
225 uint64_t *ctrl_flags = NULL;
228 /* Validate key before proceeding */
229 fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
230 if (unlikely(fc_type == -1))
233 if (fc_type == FC_GEN) {
234 cpt_ctx->fc_type = FC_GEN;
235 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
236 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
238 * We need to always say IV is from DPTR as user can
239 * sometimes iverride IV per operation.
241 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
246 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
249 /* CPT performs DES using 3DES with the 8B DES-key
250 * replicated 2 more times to match the 24B 3DES-key.
251 * Eg. If org. key is "0x0a 0x0b", then new key is
252 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
255 /* Skipping the first 8B as it will be copied
256 * in the regular code flow
258 memcpy(fctx->enc.encr_key+key_len, key, key_len);
259 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
263 /* For DES3_ECB IV need to be from CTX. */
264 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
270 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
273 /* Even though iv source is from dptr,
274 * aes_gcm salt is taken from ctx
277 memcpy(fctx->enc.encr_iv, salt, 4);
278 /* Assuming it was just salt update
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 key_len = key_len / 2;
288 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
290 /* Copy key2 for XTS into ipad */
291 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
292 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
295 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
298 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
301 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
304 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
310 /* Only for FC_GEN case */
312 /* For GMAC auth, cipher must be NULL */
313 if (cpt_ctx->hash_type != GMAC_TYPE)
314 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
316 memcpy(fctx->enc.encr_key, key, key_len);
319 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
322 cpt_ctx->enc_cipher = type;
327 static __rte_always_inline uint32_t
328 fill_sg_comp(sg_comp_t *list,
330 phys_addr_t dma_addr,
333 sg_comp_t *to = &list[i>>2];
335 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
336 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
341 static __rte_always_inline uint32_t
342 fill_sg_comp_from_buf(sg_comp_t *list,
346 sg_comp_t *to = &list[i>>2];
348 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
349 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
354 static __rte_always_inline uint32_t
355 fill_sg_comp_from_buf_min(sg_comp_t *list,
360 sg_comp_t *to = &list[i >> 2];
361 uint32_t size = *psize;
364 e_len = (size > from->size) ? from->size : size;
365 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
366 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
373 * This fills the MC expected SGIO list
374 * from IOV given by user.
376 static __rte_always_inline uint32_t
377 fill_sg_comp_from_iov(sg_comp_t *list,
379 iov_ptr_t *from, uint32_t from_offset,
380 uint32_t *psize, buf_ptr_t *extra_buf,
381 uint32_t extra_offset)
384 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
385 uint32_t size = *psize - extra_len;
389 for (j = 0; (j < from->buf_cnt) && size; j++) {
390 phys_addr_t e_dma_addr;
392 sg_comp_t *to = &list[i >> 2];
397 if (unlikely(from_offset)) {
398 if (from_offset >= bufs[j].size) {
399 from_offset -= bufs[j].size;
402 e_dma_addr = bufs[j].dma_addr + from_offset;
403 e_len = (size > (bufs[j].size - from_offset)) ?
404 (bufs[j].size - from_offset) : size;
407 e_dma_addr = bufs[j].dma_addr;
408 e_len = (size > bufs[j].size) ?
412 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
413 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
415 if (extra_len && (e_len >= extra_offset)) {
416 /* Break the data at given offset */
417 uint32_t next_len = e_len - extra_offset;
418 phys_addr_t next_dma = e_dma_addr + extra_offset;
423 e_len = extra_offset;
425 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
428 /* Insert extra data ptr */
433 rte_cpu_to_be_16(extra_buf->size);
435 rte_cpu_to_be_64(extra_buf->dma_addr);
437 /* size already decremented by extra len */
440 /* insert the rest of the data */
444 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
445 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
454 extra_offset -= size;
462 static __rte_always_inline int
463 cpt_enc_hmac_prep(uint32_t flags,
466 fc_params_t *fc_params,
470 uint32_t iv_offset = 0;
471 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
472 struct cpt_ctx *cpt_ctx;
473 uint32_t cipher_type, hash_type;
474 uint32_t mac_len, size;
476 struct cpt_request_info *req;
477 buf_ptr_t *meta_p, *aad_buf = NULL;
478 uint32_t encr_offset, auth_offset;
479 uint32_t encr_data_len, auth_data_len, aad_len = 0;
480 uint32_t passthrough_len = 0;
481 void *m_vaddr, *offset_vaddr;
482 uint64_t m_dma, offset_dma, ctx_dma;
483 vq_cmd_word0_t vq_cmd_w0;
484 vq_cmd_word3_t vq_cmd_w3;
488 opcode_info_t opcode;
490 meta_p = &fc_params->meta_buf;
491 m_vaddr = meta_p->vaddr;
492 m_dma = meta_p->dma_addr;
493 m_size = meta_p->size;
495 encr_offset = ENCR_OFFSET(d_offs);
496 auth_offset = AUTH_OFFSET(d_offs);
497 encr_data_len = ENCR_DLEN(d_lens);
498 auth_data_len = AUTH_DLEN(d_lens);
499 if (unlikely(flags & VALID_AAD_BUF)) {
501 * We dont support both aad
502 * and auth data separately
506 aad_len = fc_params->aad_buf.size;
507 aad_buf = &fc_params->aad_buf;
509 cpt_ctx = fc_params->ctx_buf.vaddr;
510 cipher_type = cpt_ctx->enc_cipher;
511 hash_type = cpt_ctx->hash_type;
512 mac_len = cpt_ctx->mac_len;
515 * Save initial space that followed app data for completion code &
516 * alternate completion code to fall in same cache line as app data
518 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
519 m_dma += COMPLETION_CODE_SIZE;
520 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
523 c_vaddr = (uint8_t *)m_vaddr + size;
524 c_dma = m_dma + size;
525 size += sizeof(cpt_res_s_t);
527 m_vaddr = (uint8_t *)m_vaddr + size;
531 /* start cpt request info struct at 8 byte boundary */
532 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
535 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
537 size += sizeof(struct cpt_request_info);
538 m_vaddr = (uint8_t *)m_vaddr + size;
542 if (hash_type == GMAC_TYPE)
545 if (unlikely(!(flags & VALID_IV_BUF))) {
547 iv_offset = ENCR_IV_OFFSET(d_offs);
550 if (unlikely(flags & VALID_AAD_BUF)) {
552 * When AAD is given, data above encr_offset is pass through
553 * Since AAD is given as separate pointer and not as offset,
554 * this is a special case as we need to fragment input data
555 * into passthrough + encr_data and then insert AAD in between.
557 if (hash_type != GMAC_TYPE) {
558 passthrough_len = encr_offset;
559 auth_offset = passthrough_len + iv_len;
560 encr_offset = passthrough_len + aad_len + iv_len;
561 auth_data_len = aad_len + encr_data_len;
563 passthrough_len = 16 + aad_len;
564 auth_offset = passthrough_len + iv_len;
565 auth_data_len = aad_len;
568 encr_offset += iv_len;
569 auth_offset += iv_len;
573 opcode.s.major = CPT_MAJOR_OP_FC;
576 auth_dlen = auth_offset + auth_data_len;
577 enc_dlen = encr_data_len + encr_offset;
578 if (unlikely(encr_data_len & 0xf)) {
579 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
580 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
581 else if (likely((cipher_type == AES_CBC) ||
582 (cipher_type == AES_ECB)))
583 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
586 if (unlikely(hash_type == GMAC_TYPE)) {
587 encr_offset = auth_dlen;
591 if (unlikely(auth_dlen > enc_dlen)) {
592 inputlen = auth_dlen;
593 outputlen = auth_dlen + mac_len;
596 outputlen = enc_dlen + mac_len;
601 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
602 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
604 * In 83XX since we have a limitation of
605 * IV & Offset control word not part of instruction
606 * and need to be part of Data Buffer, we check if
607 * head room is there and then only do the Direct mode processing
609 if (likely((flags & SINGLE_BUF_INPLACE) &&
610 (flags & SINGLE_BUF_HEADTAILROOM))) {
611 void *dm_vaddr = fc_params->bufs[0].vaddr;
612 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
614 * This flag indicates that there is 24 bytes head room and
615 * 8 bytes tail room available, so that we get to do
616 * DIRECT MODE with limitation
619 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
620 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
623 req->ist.ei1 = offset_dma;
624 /* RPTR should just exclude offset control word */
625 req->ist.ei2 = dm_dma_addr - iv_len;
626 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
627 + outputlen - iv_len);
629 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
631 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
633 if (likely(iv_len)) {
634 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
636 uint64_t *src = fc_params->iv_buf;
641 *(uint64_t *)offset_vaddr =
642 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
643 ((uint64_t)iv_offset << 8) |
644 ((uint64_t)auth_offset));
647 uint32_t i, g_size_bytes, s_size_bytes;
648 uint64_t dptr_dma, rptr_dma;
649 sg_comp_t *gather_comp;
650 sg_comp_t *scatter_comp;
653 /* This falls under strict SG mode */
654 offset_vaddr = m_vaddr;
656 size = OFF_CTRL_LEN + iv_len;
658 m_vaddr = (uint8_t *)m_vaddr + size;
662 opcode.s.major |= CPT_DMA_MODE;
664 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
666 if (likely(iv_len)) {
667 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
669 uint64_t *src = fc_params->iv_buf;
674 *(uint64_t *)offset_vaddr =
675 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
676 ((uint64_t)iv_offset << 8) |
677 ((uint64_t)auth_offset));
679 /* DPTR has SG list */
683 ((uint16_t *)in_buffer)[0] = 0;
684 ((uint16_t *)in_buffer)[1] = 0;
686 /* TODO Add error check if space will be sufficient */
687 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
695 /* Offset control word that includes iv */
696 i = fill_sg_comp(gather_comp, i, offset_dma,
697 OFF_CTRL_LEN + iv_len);
700 size = inputlen - iv_len;
702 uint32_t aad_offset = aad_len ? passthrough_len : 0;
704 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
705 i = fill_sg_comp_from_buf_min(gather_comp, i,
709 i = fill_sg_comp_from_iov(gather_comp, i,
712 aad_buf, aad_offset);
715 if (unlikely(size)) {
716 CPT_LOG_DP_ERR("Insufficient buffer space,"
717 " size %d needed", size);
718 return ERR_BAD_INPUT_ARG;
721 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
722 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
725 * Output Scatter list
729 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
732 if (likely(iv_len)) {
733 i = fill_sg_comp(scatter_comp, i,
734 offset_dma + OFF_CTRL_LEN,
738 /* output data or output data + digest*/
739 if (unlikely(flags & VALID_MAC_BUF)) {
740 size = outputlen - iv_len - mac_len;
742 uint32_t aad_offset =
743 aad_len ? passthrough_len : 0;
745 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
746 i = fill_sg_comp_from_buf_min(
752 i = fill_sg_comp_from_iov(scatter_comp,
761 return ERR_BAD_INPUT_ARG;
765 i = fill_sg_comp_from_buf(scatter_comp, i,
766 &fc_params->mac_buf);
769 /* Output including mac */
770 size = outputlen - iv_len;
772 uint32_t aad_offset =
773 aad_len ? passthrough_len : 0;
775 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
776 i = fill_sg_comp_from_buf_min(
782 i = fill_sg_comp_from_iov(scatter_comp,
790 if (unlikely(size)) {
791 CPT_LOG_DP_ERR("Insufficient buffer"
792 " space, size %d needed",
794 return ERR_BAD_INPUT_ARG;
798 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
799 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
801 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
803 /* This is DPTR len incase of SG mode */
804 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
806 m_vaddr = (uint8_t *)m_vaddr + size;
810 /* cpt alternate completion address saved earlier */
811 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
812 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
813 rptr_dma = c_dma - 8;
815 req->ist.ei1 = dptr_dma;
816 req->ist.ei2 = rptr_dma;
819 /* First 16-bit swap then 64-bit swap */
820 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
821 * to eliminate all the swapping
823 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
825 ctx_dma = fc_params->ctx_buf.dma_addr +
826 offsetof(struct cpt_ctx, fctx);
830 vq_cmd_w3.s.cptr = ctx_dma;
832 /* 16 byte aligned cpt res address */
833 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
834 *req->completion_addr = COMPLETION_CODE_INIT;
835 req->comp_baddr = c_dma;
837 /* Fill microcode part of instruction */
838 req->ist.ei0 = vq_cmd_w0.u64;
839 req->ist.ei3 = vq_cmd_w3.u64;
847 static __rte_always_inline int
848 cpt_dec_hmac_prep(uint32_t flags,
851 fc_params_t *fc_params,
855 uint32_t iv_offset = 0, size;
856 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
857 struct cpt_ctx *cpt_ctx;
858 int32_t hash_type, mac_len, m_size;
860 struct cpt_request_info *req;
861 buf_ptr_t *meta_p, *aad_buf = NULL;
862 uint32_t encr_offset, auth_offset;
863 uint32_t encr_data_len, auth_data_len, aad_len = 0;
864 uint32_t passthrough_len = 0;
865 void *m_vaddr, *offset_vaddr;
866 uint64_t m_dma, offset_dma, ctx_dma;
867 opcode_info_t opcode;
868 vq_cmd_word0_t vq_cmd_w0;
869 vq_cmd_word3_t vq_cmd_w3;
873 meta_p = &fc_params->meta_buf;
874 m_vaddr = meta_p->vaddr;
875 m_dma = meta_p->dma_addr;
876 m_size = meta_p->size;
878 encr_offset = ENCR_OFFSET(d_offs);
879 auth_offset = AUTH_OFFSET(d_offs);
880 encr_data_len = ENCR_DLEN(d_lens);
881 auth_data_len = AUTH_DLEN(d_lens);
883 if (unlikely(flags & VALID_AAD_BUF)) {
885 * We dont support both aad
886 * and auth data separately
890 aad_len = fc_params->aad_buf.size;
891 aad_buf = &fc_params->aad_buf;
894 cpt_ctx = fc_params->ctx_buf.vaddr;
895 hash_type = cpt_ctx->hash_type;
896 mac_len = cpt_ctx->mac_len;
898 if (hash_type == GMAC_TYPE)
901 if (unlikely(!(flags & VALID_IV_BUF))) {
903 iv_offset = ENCR_IV_OFFSET(d_offs);
906 if (unlikely(flags & VALID_AAD_BUF)) {
908 * When AAD is given, data above encr_offset is pass through
909 * Since AAD is given as separate pointer and not as offset,
910 * this is a special case as we need to fragment input data
911 * into passthrough + encr_data and then insert AAD in between.
913 if (hash_type != GMAC_TYPE) {
914 passthrough_len = encr_offset;
915 auth_offset = passthrough_len + iv_len;
916 encr_offset = passthrough_len + aad_len + iv_len;
917 auth_data_len = aad_len + encr_data_len;
919 passthrough_len = 16 + aad_len;
920 auth_offset = passthrough_len + iv_len;
921 auth_data_len = aad_len;
924 encr_offset += iv_len;
925 auth_offset += iv_len;
929 * Save initial space that followed app data for completion code &
930 * alternate completion code to fall in same cache line as app data
932 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
933 m_dma += COMPLETION_CODE_SIZE;
934 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
936 c_vaddr = (uint8_t *)m_vaddr + size;
937 c_dma = m_dma + size;
938 size += sizeof(cpt_res_s_t);
940 m_vaddr = (uint8_t *)m_vaddr + size;
944 /* start cpt request info structure at 8 byte alignment */
945 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
948 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
950 size += sizeof(struct cpt_request_info);
951 m_vaddr = (uint8_t *)m_vaddr + size;
956 opcode.s.major = CPT_MAJOR_OP_FC;
959 enc_dlen = encr_offset + encr_data_len;
960 auth_dlen = auth_offset + auth_data_len;
962 if (auth_dlen > enc_dlen) {
963 inputlen = auth_dlen + mac_len;
964 outputlen = auth_dlen;
966 inputlen = enc_dlen + mac_len;
967 outputlen = enc_dlen;
970 if (hash_type == GMAC_TYPE)
971 encr_offset = inputlen;
974 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
975 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
978 * In 83XX since we have a limitation of
979 * IV & Offset control word not part of instruction
980 * and need to be part of Data Buffer, we check if
981 * head room is there and then only do the Direct mode processing
983 if (likely((flags & SINGLE_BUF_INPLACE) &&
984 (flags & SINGLE_BUF_HEADTAILROOM))) {
985 void *dm_vaddr = fc_params->bufs[0].vaddr;
986 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
988 * This flag indicates that there is 24 bytes head room and
989 * 8 bytes tail room available, so that we get to do
990 * DIRECT MODE with limitation
993 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
994 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
995 req->ist.ei1 = offset_dma;
997 /* RPTR should just exclude offset control word */
998 req->ist.ei2 = dm_dma_addr - iv_len;
1000 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1001 outputlen - iv_len);
1002 /* since this is decryption,
1003 * don't touch the content of
1004 * alternate ccode space as it contains
1008 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1010 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1012 if (likely(iv_len)) {
1013 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1015 uint64_t *src = fc_params->iv_buf;
1020 *(uint64_t *)offset_vaddr =
1021 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1022 ((uint64_t)iv_offset << 8) |
1023 ((uint64_t)auth_offset));
1026 uint64_t dptr_dma, rptr_dma;
1027 uint32_t g_size_bytes, s_size_bytes;
1028 sg_comp_t *gather_comp;
1029 sg_comp_t *scatter_comp;
1033 /* This falls under strict SG mode */
1034 offset_vaddr = m_vaddr;
1036 size = OFF_CTRL_LEN + iv_len;
1038 m_vaddr = (uint8_t *)m_vaddr + size;
1042 opcode.s.major |= CPT_DMA_MODE;
1044 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1046 if (likely(iv_len)) {
1047 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1049 uint64_t *src = fc_params->iv_buf;
1054 *(uint64_t *)offset_vaddr =
1055 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1056 ((uint64_t)iv_offset << 8) |
1057 ((uint64_t)auth_offset));
1059 /* DPTR has SG list */
1060 in_buffer = m_vaddr;
1063 ((uint16_t *)in_buffer)[0] = 0;
1064 ((uint16_t *)in_buffer)[1] = 0;
1066 /* TODO Add error check if space will be sufficient */
1067 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1074 /* Offset control word that includes iv */
1075 i = fill_sg_comp(gather_comp, i, offset_dma,
1076 OFF_CTRL_LEN + iv_len);
1078 /* Add input data */
1079 if (flags & VALID_MAC_BUF) {
1080 size = inputlen - iv_len - mac_len;
1082 /* input data only */
1083 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1084 i = fill_sg_comp_from_buf_min(
1089 uint32_t aad_offset = aad_len ?
1090 passthrough_len : 0;
1092 i = fill_sg_comp_from_iov(gather_comp,
1100 return ERR_BAD_INPUT_ARG;
1105 i = fill_sg_comp_from_buf(gather_comp, i,
1106 &fc_params->mac_buf);
1109 /* input data + mac */
1110 size = inputlen - iv_len;
1112 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1113 i = fill_sg_comp_from_buf_min(
1118 uint32_t aad_offset = aad_len ?
1119 passthrough_len : 0;
1121 if (!fc_params->src_iov)
1122 return ERR_BAD_INPUT_ARG;
1124 i = fill_sg_comp_from_iov(
1133 return ERR_BAD_INPUT_ARG;
1136 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1137 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1140 * Output Scatter List
1145 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1149 i = fill_sg_comp(scatter_comp, i,
1150 offset_dma + OFF_CTRL_LEN,
1154 /* Add output data */
1155 size = outputlen - iv_len;
1157 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1158 /* handle single buffer here */
1159 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1163 uint32_t aad_offset = aad_len ?
1164 passthrough_len : 0;
1166 if (!fc_params->dst_iov)
1167 return ERR_BAD_INPUT_ARG;
1169 i = fill_sg_comp_from_iov(scatter_comp, i,
1170 fc_params->dst_iov, 0,
1176 return ERR_BAD_INPUT_ARG;
1179 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1180 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1182 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1184 /* This is DPTR len incase of SG mode */
1185 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1187 m_vaddr = (uint8_t *)m_vaddr + size;
1191 /* cpt alternate completion address saved earlier */
1192 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1193 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1194 rptr_dma = c_dma - 8;
1195 size += COMPLETION_CODE_SIZE;
1197 req->ist.ei1 = dptr_dma;
1198 req->ist.ei2 = rptr_dma;
1201 /* First 16-bit swap then 64-bit swap */
1202 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1203 * to eliminate all the swapping
1205 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1207 ctx_dma = fc_params->ctx_buf.dma_addr +
1208 offsetof(struct cpt_ctx, fctx);
1211 vq_cmd_w3.s.grp = 0;
1212 vq_cmd_w3.s.cptr = ctx_dma;
1214 /* 16 byte aligned cpt res address */
1215 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1216 *req->completion_addr = COMPLETION_CODE_INIT;
1217 req->comp_baddr = c_dma;
1219 /* Fill microcode part of instruction */
1220 req->ist.ei0 = vq_cmd_w0.u64;
1221 req->ist.ei3 = vq_cmd_w3.u64;
1229 static __rte_always_inline void *
1230 cpt_fc_dec_hmac_prep(uint32_t flags,
1233 fc_params_t *fc_params,
1234 void *op, int *ret_val)
1236 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
1238 void *prep_req = NULL;
1241 fc_type = ctx->fc_type;
1243 if (likely(fc_type == FC_GEN)) {
1244 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens,
1245 fc_params, op, &prep_req);
1248 * For AUTH_ONLY case,
1249 * MC only supports digest generation and verification
1250 * should be done in software by memcmp()
1256 if (unlikely(!prep_req))
1261 static __rte_always_inline void *__hot
1262 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1263 fc_params_t *fc_params, void *op, int *ret_val)
1265 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
1267 void *prep_req = NULL;
1270 fc_type = ctx->fc_type;
1272 /* Common api for rest of the ops */
1273 if (likely(fc_type == FC_GEN)) {
1274 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens,
1275 fc_params, op, &prep_req);
1280 if (unlikely(!prep_req))
1285 static __rte_always_inline int
1286 cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
1287 uint16_t key_len, uint16_t mac_len)
1289 struct cpt_ctx *cpt_ctx = ctx;
1290 mc_fc_context_t *fctx = &cpt_ctx->fctx;
1291 uint64_t *ctrl_flags = NULL;
1293 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
1298 /* No support for AEAD yet */
1299 if (cpt_ctx->enc_cipher)
1301 /* For ZUC/SNOW3G/Kasumi */
1304 cpt_ctx->snow3g = 1;
1305 gen_key_snow3g(key, keyx);
1306 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
1307 cpt_ctx->fc_type = ZUC_SNOW3G;
1308 cpt_ctx->zsk_flags = 0x1;
1311 cpt_ctx->snow3g = 0;
1312 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
1313 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
1314 cpt_ctx->fc_type = ZUC_SNOW3G;
1315 cpt_ctx->zsk_flags = 0x1;
1318 /* Kasumi ECB mode */
1320 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
1321 cpt_ctx->fc_type = KASUMI;
1322 cpt_ctx->zsk_flags = 0x1;
1325 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
1326 cpt_ctx->fc_type = KASUMI;
1327 cpt_ctx->zsk_flags = 0x1;
1332 cpt_ctx->mac_len = 4;
1333 cpt_ctx->hash_type = type;
1337 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
1338 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
1339 cpt_ctx->fc_type = HASH_HMAC;
1342 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
1343 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
1345 /* For GMAC auth, cipher must be NULL */
1346 if (type == GMAC_TYPE)
1347 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
1349 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
1350 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
1354 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
1355 memcpy(cpt_ctx->auth_key, key, key_len);
1356 cpt_ctx->auth_key_len = key_len;
1357 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
1358 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
1359 memcpy(fctx->hmac.opad, key, key_len);
1360 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
1362 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
1366 static __rte_always_inline int
1367 fill_sess_aead(struct rte_crypto_sym_xform *xform,
1368 struct cpt_sess_misc *sess)
1370 struct rte_crypto_aead_xform *aead_form;
1371 cipher_type_t enc_type = 0; /* NULL Cipher type */
1372 auth_type_t auth_type = 0; /* NULL Auth type */
1373 uint32_t cipher_key_len = 0;
1374 uint8_t zsk_flag = 0, aes_gcm = 0;
1375 aead_form = &xform->aead;
1378 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
1379 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
1380 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
1381 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
1382 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
1383 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
1384 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
1385 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
1387 CPT_LOG_DP_ERR("Unknown cipher operation\n");
1390 switch (aead_form->algo) {
1391 case RTE_CRYPTO_AEAD_AES_GCM:
1393 cipher_key_len = 16;
1396 case RTE_CRYPTO_AEAD_AES_CCM:
1397 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
1401 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
1405 if (aead_form->key.length < cipher_key_len) {
1406 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
1407 (unsigned int long)aead_form->key.length);
1410 sess->zsk_flag = zsk_flag;
1411 sess->aes_gcm = aes_gcm;
1412 sess->mac_len = aead_form->digest_length;
1413 sess->iv_offset = aead_form->iv.offset;
1414 sess->iv_length = aead_form->iv.length;
1415 sess->aad_length = aead_form->aad_length;
1416 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
1418 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
1419 aead_form->key.length, NULL);
1421 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
1426 static __rte_always_inline int
1427 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
1428 struct cpt_sess_misc *sess)
1430 struct rte_crypto_cipher_xform *c_form;
1431 cipher_type_t enc_type = 0; /* NULL Cipher type */
1432 uint32_t cipher_key_len = 0;
1433 uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
1435 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1438 c_form = &xform->cipher;
1440 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1441 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
1442 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
1443 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
1445 CPT_LOG_DP_ERR("Unknown cipher operation\n");
1449 switch (c_form->algo) {
1450 case RTE_CRYPTO_CIPHER_AES_CBC:
1452 cipher_key_len = 16;
1454 case RTE_CRYPTO_CIPHER_3DES_CBC:
1455 enc_type = DES3_CBC;
1456 cipher_key_len = 24;
1458 case RTE_CRYPTO_CIPHER_DES_CBC:
1459 /* DES is implemented using 3DES in hardware */
1460 enc_type = DES3_CBC;
1463 case RTE_CRYPTO_CIPHER_AES_CTR:
1465 cipher_key_len = 16;
1468 case RTE_CRYPTO_CIPHER_NULL:
1472 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1473 enc_type = KASUMI_F8_ECB;
1474 cipher_key_len = 16;
1477 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1478 enc_type = SNOW3G_UEA2;
1479 cipher_key_len = 16;
1482 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1483 enc_type = ZUC_EEA3;
1484 cipher_key_len = 16;
1487 case RTE_CRYPTO_CIPHER_AES_XTS:
1489 cipher_key_len = 16;
1491 case RTE_CRYPTO_CIPHER_3DES_ECB:
1492 enc_type = DES3_ECB;
1493 cipher_key_len = 24;
1495 case RTE_CRYPTO_CIPHER_AES_ECB:
1497 cipher_key_len = 16;
1499 case RTE_CRYPTO_CIPHER_3DES_CTR:
1500 case RTE_CRYPTO_CIPHER_AES_F8:
1501 case RTE_CRYPTO_CIPHER_ARC4:
1502 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
1506 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
1511 if (c_form->key.length < cipher_key_len) {
1512 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
1513 (unsigned long) c_form->key.length);
1517 sess->zsk_flag = zsk_flag;
1518 sess->aes_gcm = aes_gcm;
1519 sess->aes_ctr = aes_ctr;
1520 sess->iv_offset = c_form->iv.offset;
1521 sess->iv_length = c_form->iv.length;
1522 sess->is_null = is_null;
1524 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
1525 c_form->key.length, NULL);
1530 static __rte_always_inline int
1531 fill_sess_auth(struct rte_crypto_sym_xform *xform,
1532 struct cpt_sess_misc *sess)
1534 struct rte_crypto_auth_xform *a_form;
1535 auth_type_t auth_type = 0; /* NULL Auth type */
1536 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1538 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
1541 a_form = &xform->auth;
1543 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1544 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
1545 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1546 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
1548 CPT_LOG_DP_ERR("Unknown auth operation");
1552 if (a_form->key.length > 64) {
1553 CPT_LOG_DP_ERR("Auth key length is big");
1557 switch (a_form->algo) {
1558 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1560 case RTE_CRYPTO_AUTH_SHA1:
1561 auth_type = SHA1_TYPE;
1563 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1564 case RTE_CRYPTO_AUTH_SHA256:
1565 auth_type = SHA2_SHA256;
1567 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1568 case RTE_CRYPTO_AUTH_SHA512:
1569 auth_type = SHA2_SHA512;
1571 case RTE_CRYPTO_AUTH_AES_GMAC:
1572 auth_type = GMAC_TYPE;
1575 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1576 case RTE_CRYPTO_AUTH_SHA224:
1577 auth_type = SHA2_SHA224;
1579 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1580 case RTE_CRYPTO_AUTH_SHA384:
1581 auth_type = SHA2_SHA384;
1583 case RTE_CRYPTO_AUTH_MD5_HMAC:
1584 case RTE_CRYPTO_AUTH_MD5:
1585 auth_type = MD5_TYPE;
1587 case RTE_CRYPTO_AUTH_KASUMI_F9:
1588 auth_type = KASUMI_F9_ECB;
1590 * Indicate that direction needs to be taken out
1595 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1596 auth_type = SNOW3G_UIA2;
1599 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1600 auth_type = ZUC_EIA3;
1603 case RTE_CRYPTO_AUTH_NULL:
1607 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1608 case RTE_CRYPTO_AUTH_AES_CMAC:
1609 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1610 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
1614 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
1619 sess->zsk_flag = zsk_flag;
1620 sess->aes_gcm = aes_gcm;
1621 sess->mac_len = a_form->digest_length;
1622 sess->is_null = is_null;
1624 sess->auth_iv_offset = a_form->iv.offset;
1625 sess->auth_iv_length = a_form->iv.length;
1627 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
1628 a_form->key.length, a_form->digest_length);
1636 static __rte_always_inline int
1637 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
1638 struct cpt_sess_misc *sess)
1640 struct rte_crypto_auth_xform *a_form;
1641 cipher_type_t enc_type = 0; /* NULL Cipher type */
1642 auth_type_t auth_type = 0; /* NULL Auth type */
1643 uint8_t zsk_flag = 0, aes_gcm = 0;
1646 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
1649 a_form = &xform->auth;
1651 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1652 sess->cpt_op |= CPT_OP_ENCODE;
1653 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1654 sess->cpt_op |= CPT_OP_DECODE;
1656 CPT_LOG_DP_ERR("Unknown auth operation");
1660 switch (a_form->algo) {
1661 case RTE_CRYPTO_AUTH_AES_GMAC:
1663 auth_type = GMAC_TYPE;
1666 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
1671 sess->zsk_flag = zsk_flag;
1672 sess->aes_gcm = aes_gcm;
1674 sess->iv_offset = a_form->iv.offset;
1675 sess->iv_length = a_form->iv.length;
1676 sess->mac_len = a_form->digest_length;
1677 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
1679 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
1680 a_form->key.length, NULL);
1681 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
1686 static __rte_always_inline void *
1687 alloc_op_meta(struct rte_mbuf *m_src,
1690 struct rte_mempool *cpt_meta_pool)
1694 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
1695 if (likely(m_src && (m_src->nb_segs == 1))) {
1699 /* Check if tailroom is sufficient to hold meta data */
1700 tailroom = rte_pktmbuf_tailroom(m_src);
1701 if (likely(tailroom > len + 8)) {
1702 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
1703 mphys = m_src->buf_physaddr + m_src->buf_len;
1707 buf->dma_addr = mphys;
1709 /* Indicate that this is a mbuf allocated mdata */
1710 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
1715 RTE_SET_USED(m_src);
1718 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1722 buf->dma_addr = rte_mempool_virt2iova(mdata);
1729 * cpt_free_metabuf - free metabuf to mempool.
1730 * @param instance: pointer to instance.
1731 * @param objp: pointer to the metabuf.
1733 static __rte_always_inline void
1734 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
1736 bool nofree = ((uintptr_t)mdata & 1ull);
1740 rte_mempool_put(cpt_meta_pool, mdata);
1743 static __rte_always_inline uint32_t
1744 prepare_iov_from_pkt(struct rte_mbuf *pkt,
1745 iov_ptr_t *iovec, uint32_t start_offset)
1748 void *seg_data = NULL;
1749 phys_addr_t seg_phys;
1750 int32_t seg_size = 0;
1757 if (!start_offset) {
1758 seg_data = rte_pktmbuf_mtod(pkt, void *);
1759 seg_phys = rte_pktmbuf_mtophys(pkt);
1760 seg_size = pkt->data_len;
1762 while (start_offset >= pkt->data_len) {
1763 start_offset -= pkt->data_len;
1767 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
1768 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
1769 seg_size = pkt->data_len - start_offset;
1775 iovec->bufs[index].vaddr = seg_data;
1776 iovec->bufs[index].dma_addr = seg_phys;
1777 iovec->bufs[index].size = seg_size;
1781 while (unlikely(pkt != NULL)) {
1782 seg_data = rte_pktmbuf_mtod(pkt, void *);
1783 seg_phys = rte_pktmbuf_mtophys(pkt);
1784 seg_size = pkt->data_len;
1788 iovec->bufs[index].vaddr = seg_data;
1789 iovec->bufs[index].dma_addr = seg_phys;
1790 iovec->bufs[index].size = seg_size;
1797 iovec->buf_cnt = index;
1801 static __rte_always_inline uint32_t
1802 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
1807 void *seg_data = NULL;
1808 phys_addr_t seg_phys;
1809 uint32_t seg_size = 0;
1812 seg_data = rte_pktmbuf_mtod(pkt, void *);
1813 seg_phys = rte_pktmbuf_mtophys(pkt);
1814 seg_size = pkt->data_len;
1817 if (likely(!pkt->next)) {
1818 uint32_t headroom, tailroom;
1820 *flags |= SINGLE_BUF_INPLACE;
1821 headroom = rte_pktmbuf_headroom(pkt);
1822 tailroom = rte_pktmbuf_tailroom(pkt);
1823 if (likely((headroom >= 24) &&
1825 /* In 83XX this is prerequivisit for Direct mode */
1826 *flags |= SINGLE_BUF_HEADTAILROOM;
1828 param->bufs[0].vaddr = seg_data;
1829 param->bufs[0].dma_addr = seg_phys;
1830 param->bufs[0].size = seg_size;
1833 iovec = param->src_iov;
1834 iovec->bufs[index].vaddr = seg_data;
1835 iovec->bufs[index].dma_addr = seg_phys;
1836 iovec->bufs[index].size = seg_size;
1840 while (unlikely(pkt != NULL)) {
1841 seg_data = rte_pktmbuf_mtod(pkt, void *);
1842 seg_phys = rte_pktmbuf_mtophys(pkt);
1843 seg_size = pkt->data_len;
1848 iovec->bufs[index].vaddr = seg_data;
1849 iovec->bufs[index].dma_addr = seg_phys;
1850 iovec->bufs[index].size = seg_size;
1857 iovec->buf_cnt = index;
1861 static __rte_always_inline void *
1862 fill_fc_params(struct rte_crypto_op *cop,
1863 struct cpt_sess_misc *sess_misc,
1868 struct rte_crypto_sym_op *sym_op = cop->sym;
1871 uint32_t mc_hash_off;
1873 uint64_t d_offs, d_lens;
1874 void *prep_req = NULL;
1875 struct rte_mbuf *m_src, *m_dst;
1876 uint8_t cpt_op = sess_misc->cpt_op;
1877 uint8_t zsk_flag = sess_misc->zsk_flag;
1878 uint8_t aes_gcm = sess_misc->aes_gcm;
1879 uint16_t mac_len = sess_misc->mac_len;
1880 #ifdef CPT_ALWAYS_USE_SG_MODE
1881 uint8_t inplace = 0;
1883 uint8_t inplace = 1;
1885 fc_params_t fc_params;
1886 char src[SRC_IOV_SIZE];
1887 char dst[SRC_IOV_SIZE];
1889 struct cptvf_meta_info *cpt_m_info =
1890 (struct cptvf_meta_info *)(*mdata_ptr);
1892 if (likely(sess_misc->iv_length)) {
1893 flags |= VALID_IV_BUF;
1894 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
1895 uint8_t *, sess_misc->iv_offset);
1896 if (sess_misc->aes_ctr &&
1897 unlikely(sess_misc->iv_length != 16)) {
1898 memcpy((uint8_t *)iv_buf,
1899 rte_crypto_op_ctod_offset(cop,
1900 uint8_t *, sess_misc->iv_offset), 12);
1901 iv_buf[3] = rte_cpu_to_be_32(0x1);
1902 fc_params.iv_buf = iv_buf;
1907 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
1909 sess_misc->auth_iv_offset);
1910 if (zsk_flag == K_F9) {
1911 CPT_LOG_DP_ERR("Should not reach here for "
1914 if (zsk_flag != ZS_EA)
1917 m_src = sym_op->m_src;
1918 m_dst = sym_op->m_dst;
1925 d_offs = sym_op->aead.data.offset;
1926 d_lens = sym_op->aead.data.length;
1927 mc_hash_off = sym_op->aead.data.offset +
1928 sym_op->aead.data.length;
1930 aad_data = sym_op->aead.aad.data;
1931 aad_len = sess_misc->aad_length;
1932 if (likely((aad_data + aad_len) ==
1933 rte_pktmbuf_mtod_offset(m_src,
1935 sym_op->aead.data.offset))) {
1936 d_offs = (d_offs - aad_len) | (d_offs << 16);
1937 d_lens = (d_lens + aad_len) | (d_lens << 32);
1939 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
1940 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
1941 fc_params.aad_buf.size = aad_len;
1942 flags |= VALID_AAD_BUF;
1944 d_offs = d_offs << 16;
1945 d_lens = d_lens << 32;
1948 salt = fc_params.iv_buf;
1949 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
1950 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
1951 sess_misc->salt = *(uint32_t *)salt;
1953 fc_params.iv_buf = salt + 4;
1954 if (likely(mac_len)) {
1955 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
1961 /* hmac immediately following data is best case */
1962 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
1964 (uint8_t *)sym_op->aead.digest.data)) {
1965 flags |= VALID_MAC_BUF;
1966 fc_params.mac_buf.size = sess_misc->mac_len;
1967 fc_params.mac_buf.vaddr =
1968 sym_op->aead.digest.data;
1969 fc_params.mac_buf.dma_addr =
1970 sym_op->aead.digest.phys_addr;
1975 d_offs = sym_op->cipher.data.offset;
1976 d_lens = sym_op->cipher.data.length;
1977 mc_hash_off = sym_op->cipher.data.offset +
1978 sym_op->cipher.data.length;
1979 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
1980 d_lens = (d_lens << 32) | sym_op->auth.data.length;
1982 if (mc_hash_off < (sym_op->auth.data.offset +
1983 sym_op->auth.data.length)){
1984 mc_hash_off = (sym_op->auth.data.offset +
1985 sym_op->auth.data.length);
1987 /* for gmac, salt should be updated like in gcm */
1988 if (unlikely(sess_misc->is_gmac)) {
1990 salt = fc_params.iv_buf;
1991 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
1992 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
1993 sess_misc->salt = *(uint32_t *)salt;
1995 fc_params.iv_buf = salt + 4;
1997 if (likely(mac_len)) {
2000 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
2004 /* hmac immediately following data is best case */
2005 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2007 (uint8_t *)sym_op->auth.digest.data)) {
2008 flags |= VALID_MAC_BUF;
2009 fc_params.mac_buf.size =
2011 fc_params.mac_buf.vaddr =
2012 sym_op->auth.digest.data;
2013 fc_params.mac_buf.dma_addr =
2014 sym_op->auth.digest.phys_addr;
2019 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
2020 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
2022 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
2025 if (likely(!m_dst && inplace)) {
2026 /* Case of single buffer without AAD buf or
2027 * separate mac buf in place and
2030 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2032 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
2035 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
2041 /* Out of place processing */
2042 fc_params.src_iov = (void *)src;
2043 fc_params.dst_iov = (void *)dst;
2045 /* Store SG I/O in the api for reuse */
2046 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2047 CPT_LOG_DP_ERR("Prepare src iov failed");
2052 if (unlikely(m_dst != NULL)) {
2055 /* Try to make room as much as src has */
2056 m_dst = sym_op->m_dst;
2057 pkt_len = rte_pktmbuf_pkt_len(m_dst);
2059 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2060 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2061 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2062 CPT_LOG_DP_ERR("Not enough space in "
2070 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2071 CPT_LOG_DP_ERR("Prepare dst iov failed for "
2076 fc_params.dst_iov = (void *)src;
2080 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
2081 mdata = alloc_op_meta(m_src,
2082 &fc_params.meta_buf,
2083 cpt_m_info->cptvf_op_sb_mlen,
2084 cpt_m_info->cptvf_meta_pool);
2086 mdata = alloc_op_meta(NULL,
2087 &fc_params.meta_buf,
2088 cpt_m_info->cptvf_op_mlen,
2089 cpt_m_info->cptvf_meta_pool);
2091 if (unlikely(mdata == NULL)) {
2092 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
2096 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
2097 op[0] = (uintptr_t)mdata;
2098 op[1] = (uintptr_t)cop;
2099 op[2] = op[3] = 0; /* Used to indicate auth verify */
2100 space += 4 * sizeof(uint64_t);
2102 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
2103 fc_params.meta_buf.dma_addr += space;
2104 fc_params.meta_buf.size -= space;
2106 /* Finally prepare the instruction */
2107 if (cpt_op & CPT_OP_ENCODE)
2108 prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
2109 &fc_params, op, op_ret);
2111 prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
2112 &fc_params, op, op_ret);
2114 if (unlikely(!prep_req))
2115 free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
2120 static __rte_always_inline int
2121 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
2123 struct rte_crypto_sym_xform *chain;
2125 CPT_PMD_INIT_FUNC_TRACE();
2127 if (cpt_is_algo_supported(xform))
2132 switch (chain->type) {
2133 case RTE_CRYPTO_SYM_XFORM_AEAD:
2134 if (fill_sess_aead(chain, sess))
2137 case RTE_CRYPTO_SYM_XFORM_CIPHER:
2138 if (fill_sess_cipher(chain, sess))
2141 case RTE_CRYPTO_SYM_XFORM_AUTH:
2142 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
2143 if (fill_sess_gmac(chain, sess))
2146 if (fill_sess_auth(chain, sess))
2151 CPT_LOG_DP_ERR("Invalid crypto xform type");
2154 chain = chain->next;
2163 #endif /*_CPT_UCODE_H_ */