#ifndef _CPT_UCODE_H_
#define _CPT_UCODE_H_
-
#include <stdbool.h>
#include "cpt_common.h"
return (uint32_t)i;
}
+static __rte_always_inline int
+cpt_digest_gen_prep(uint32_t flags,
+ uint64_t d_lens,
+ digest_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ struct cpt_request_info *req;
+ uint32_t size, i;
+ int32_t m_size;
+ uint16_t data_len, mac_len, key_len;
+ auth_type_t hash_type;
+ buf_ptr_t *meta_p;
+ struct cpt_ctx *ctx;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr, *m_vaddr;
+ uint64_t c_dma, m_dma;
+ opcode_info_t opcode;
+
+ if (!params || !params->ctx_buf.vaddr)
+ return ERR_BAD_INPUT_ARG;
+
+ ctx = params->ctx_buf.vaddr;
+ meta_p = ¶ms->meta_buf;
+
+ if (!meta_p->vaddr || !meta_p->dma_addr)
+ return ERR_BAD_INPUT_ARG;
+
+ if (meta_p->size < sizeof(struct cpt_request_info))
+ return ERR_BAD_INPUT_ARG;
+
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+ m_size = meta_p->size;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ hash_type = ctx->hash_type;
+ mac_len = ctx->mac_len;
+ key_len = ctx->auth_key_len;
+ data_len = AUTH_DLEN(d_lens);
+
+ /*GP op header */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
+ if (ctx->hmac) {
+ opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
+ vq_cmd_w0.s.dlen =
+ rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
+ } else {
+ opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
+ vq_cmd_w0.s.param1 = 0;
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
+ }
+
+ opcode.s.minor = 0;
+
+ /* Null auth only case enters the if */
+ if (unlikely(!hash_type && !ctx->enc_cipher)) {
+ opcode.s.major = CPT_MAJOR_OP_MISC;
+ /* Minor op is passthrough */
+ opcode.s.minor = 0x03;
+ /* Send out completion code only */
+ vq_cmd_w0.s.param2 = 0x1;
+ }
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input gather list
+ */
+
+ i = 0;
+
+ if (ctx->hmac) {
+ uint64_t k_dma = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, auth_key);
+ /* Key */
+ i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
+ }
+
+ /* input data */
+ size = data_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
+ 0, &size, NULL, 0);
+ if (size) {
+ CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
+ " by %dB", size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ } else {
+ /*
+ * Looks like we need to support zero data
+ * gather ptr in case of hash & hmac
+ */
+ i++;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Gather list
+ */
+
+ i = 0;
+ scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags & VALID_MAC_BUF) {
+ if (params->mac_buf.size < mac_len)
+ return ERR_BAD_INPUT_ARG;
+
+ size = mac_len;
+ i = fill_sg_comp_from_buf_min(scatter_comp, i,
+ ¶ms->mac_buf, &size);
+ } else {
+ size = mac_len;
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->src_iov, data_len,
+ &size, NULL, 0);
+ if (size) {
+ CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short by"
+ " %dB", size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
static __rte_always_inline int
cpt_enc_hmac_prep(uint32_t flags,
uint64_t d_offs,
} else if (fc_type == KASUMI) {
ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens,
fc_params, op, &prep_req);
+ } else if (fc_type == HASH_HMAC) {
+ ret = cpt_digest_gen_prep(flags, d_lens, fc_params, op,
+ &prep_req);
} else {
ret = ERR_EIO;
}
return -1;
}
+static __rte_always_inline void
+find_kasumif9_direction_and_length(uint8_t *src,
+ uint32_t counter_num_bytes,
+ uint32_t *addr_length_in_bits,
+ uint8_t *addr_direction)
+{
+ uint8_t found = 0;
+ while (!found && counter_num_bytes > 0) {
+ counter_num_bytes--;
+ if (src[counter_num_bytes] == 0x00)
+ continue;
+ if (src[counter_num_bytes] == 0x80) {
+ *addr_direction = src[counter_num_bytes - 1] & 0x1;
+ *addr_length_in_bits = counter_num_bytes * 8 - 1;
+ found = 1;
+ } else {
+ int i = 0;
+ uint8_t last_byte = src[counter_num_bytes];
+ for (i = 0; i < 8 && found == 0; i++) {
+ if (last_byte & (1 << i)) {
+ *addr_direction = (last_byte >> (i+1))
+ & 0x1;
+ if (i != 6)
+ *addr_length_in_bits =
+ counter_num_bytes * 8
+ + (8 - (i + 2));
+ else
+ *addr_length_in_bits =
+ counter_num_bytes * 8;
+ found = 1;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * This handles all auth only except AES_GMAC
+ */
+static __rte_always_inline void *
+fill_digest_params(struct rte_crypto_op *cop,
+ struct cpt_sess_misc *sess,
+ void **mdata_ptr,
+ int *op_ret)
+{
+ uint32_t space = 0;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ void *mdata;
+ phys_addr_t mphys;
+ uint64_t *op;
+ uint32_t auth_range_off;
+ uint32_t flags = 0;
+ uint64_t d_offs = 0, d_lens;
+ void *prep_req = NULL;
+ struct rte_mbuf *m_src, *m_dst;
+ uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
+ uint8_t zsk_flag = sess->zsk_flag;
+ uint16_t mac_len = sess->mac_len;
+ fc_params_t params;
+ char src[SRC_IOV_SIZE];
+ uint8_t iv_buf[16];
+ memset(¶ms, 0, sizeof(fc_params_t));
+ struct cptvf_meta_info *cpt_m_info =
+ (struct cptvf_meta_info *)(*mdata_ptr);
+
+ m_src = sym_op->m_src;
+
+ /* For just digest lets force mempool alloc */
+ mdata = alloc_op_meta(NULL, ¶ms.meta_buf, cpt_m_info->cptvf_op_mlen,
+ cpt_m_info->cptvf_meta_pool);
+ if (mdata == NULL) {
+ CPT_LOG_DP_ERR("Error allocating meta buffer for request");
+ *op_ret = -ENOMEM;
+ return NULL;
+ }
+
+ mphys = params.meta_buf.dma_addr;
+
+ op = mdata;
+ op[0] = (uintptr_t)mdata;
+ op[1] = (uintptr_t)cop;
+ op[2] = op[3] = 0; /* Used to indicate auth verify */
+ space += 4 * sizeof(uint64_t);
+
+ auth_range_off = sym_op->auth.data.offset;
+
+ flags = VALID_MAC_BUF;
+ params.src_iov = (void *)src;
+ if (unlikely(zsk_flag)) {
+ /*
+ * Since for Zuc, Kasumi, Snow3g offsets are in bits
+ * we will send pass through even for auth only case,
+ * let MC handle it
+ */
+ d_offs = auth_range_off;
+ auth_range_off = 0;
+ params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
+ uint8_t *, sess->auth_iv_offset);
+ if (zsk_flag == K_F9) {
+ uint32_t length_in_bits, num_bytes;
+ uint8_t *src, direction = 0;
+ uint32_t counter_num_bytes;
+
+ memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
+ uint8_t *), 8);
+ /*
+ * This is kasumi f9, take direction from
+ * source buffer
+ */
+ length_in_bits = cop->sym->auth.data.length;
+ num_bytes = (length_in_bits >> 3);
+ counter_num_bytes = num_bytes;
+ src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
+ find_kasumif9_direction_and_length(src,
+ counter_num_bytes,
+ &length_in_bits,
+ &direction);
+ length_in_bits -= 64;
+ cop->sym->auth.data.offset += 64;
+ d_offs = cop->sym->auth.data.offset;
+ auth_range_off = d_offs / 8;
+ cop->sym->auth.data.length = length_in_bits;
+
+ /* Store it at end of auth iv */
+ iv_buf[8] = direction;
+ params.auth_iv_buf = iv_buf;
+ }
+ }
+
+ d_lens = sym_op->auth.data.length;
+
+ params.ctx_buf.vaddr = SESS_PRIV(sess);
+ params.ctx_buf.dma_addr = sess->ctx_dma_addr;
+
+ if (auth_op == CPT_OP_AUTH_GENERATE) {
+ if (sym_op->auth.digest.data) {
+ /*
+ * Digest to be generated
+ * in separate buffer
+ */
+ params.mac_buf.size =
+ sess->mac_len;
+ params.mac_buf.vaddr =
+ sym_op->auth.digest.data;
+ params.mac_buf.dma_addr =
+ sym_op->auth.digest.phys_addr;
+ } else {
+ uint32_t off = sym_op->auth.data.offset +
+ sym_op->auth.data.length;
+ int32_t dlen, space;
+
+ m_dst = sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src;
+ dlen = rte_pktmbuf_pkt_len(m_dst);
+
+ space = off + mac_len - dlen;
+ if (space > 0)
+ if (!rte_pktmbuf_append(m_dst, space)) {
+ CPT_LOG_DP_ERR("Failed to extend "
+ "mbuf by %uB", space);
+ goto err;
+ }
+
+ params.mac_buf.vaddr =
+ rte_pktmbuf_mtod_offset(m_dst, void *, off);
+ params.mac_buf.dma_addr =
+ rte_pktmbuf_mtophys_offset(m_dst, off);
+ params.mac_buf.size = mac_len;
+ }
+ } else {
+ /* Need space for storing generated mac */
+ params.mac_buf.vaddr = (uint8_t *)mdata + space;
+ params.mac_buf.dma_addr = mphys + space;
+ params.mac_buf.size = mac_len;
+ space += RTE_ALIGN_CEIL(mac_len, 8);
+ op[2] = (uintptr_t)params.mac_buf.vaddr;
+ op[3] = mac_len;
+ }
+
+ params.meta_buf.vaddr = (uint8_t *)mdata + space;
+ params.meta_buf.dma_addr = mphys + space;
+ params.meta_buf.size -= space;
+
+ /* Out of place processing */
+ params.src_iov = (void *)src;
+
+ /*Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
+ CPT_LOG_DP_ERR("Prepare src iov failed");
+ *op_ret = -1;
+ goto err;
+ }
+
+ prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+ ¶ms, op, op_ret);
+ *mdata_ptr = mdata;
+ return prep_req;
+err:
+ if (unlikely(!prep_req))
+ free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
+ return NULL;
+}
+
#endif /*_CPT_UCODE_H_ */