#define NPS_PKT_IN_INSTR_SIZE 64
#define IV_FROM_DPTR 1
#define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
+#define FLEXI_CRYPTO_MAX_AAD_LEN 512
#define AES_KEYSIZE_128 16
#define AES_KEYSIZE_192 24
#define AES_KEYSIZE_256 32
}
}
break;
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ res = NITROX_CHAIN_COMBINED;
+ break;
default:
break;
}
type = CIPHER_AES_CBC;
*is_aes = true;
break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ type = CIPHER_3DES_CBC;
+ *is_aes = false;
+ break;
default:
type = CIPHER_INVALID;
NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
}
static bool
-auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform,
- struct flexi_crypto_context *fctx)
+auth_key_is_valid(const uint8_t *data, uint16_t length,
+ struct flexi_crypto_context *fctx)
{
- if (unlikely(!xform->key.data && xform->key.length)) {
+ if (unlikely(!data && length)) {
NITROX_LOG(ERR, "Invalid auth key\n");
return false;
}
- if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) {
+ if (unlikely(length > sizeof(fctx->auth.opad))) {
NITROX_LOG(ERR, "Invalid auth key length %d\n",
- xform->key.length);
+ length);
return false;
}
if (unlikely(type == AUTH_INVALID))
return -ENOTSUP;
- if (unlikely(!auth_key_digest_is_valid(xform, fctx)))
+ if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
+ fctx)))
return -EINVAL;
- ctx->auth_op = xform->op;
- ctx->auth_algo = xform->algo;
ctx->digest_length = xform->digest_length;
fctx->flags = rte_be_to_cpu_64(fctx->flags);
return 0;
}
+static int
+configure_aead_ctx(struct rte_crypto_aead_xform *xform,
+ struct nitrox_crypto_ctx *ctx)
+{
+ int aes_keylen;
+ struct flexi_crypto_context *fctx = &ctx->fctx;
+
+ if (unlikely(xform->aad_length > FLEXI_CRYPTO_MAX_AAD_LEN)) {
+ NITROX_LOG(ERR, "AAD length %d not supported\n",
+ xform->aad_length);
+ return -ENOTSUP;
+ }
+
+ if (unlikely(xform->algo != RTE_CRYPTO_AEAD_AES_GCM))
+ return -ENOTSUP;
+
+ aes_keylen = flexi_aes_keylen(xform->key.length, true);
+ if (unlikely(aes_keylen < 0))
+ return -EINVAL;
+
+ if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
+ fctx)))
+ return -EINVAL;
+
+ if (unlikely(xform->iv.length > MAX_IV_LEN))
+ return -EINVAL;
+
+ fctx->flags = rte_be_to_cpu_64(fctx->flags);
+ fctx->w0.cipher_type = CIPHER_AES_GCM;
+ fctx->w0.aes_keylen = aes_keylen;
+ fctx->w0.iv_source = IV_FROM_DPTR;
+ fctx->w0.hash_type = AUTH_NULL;
+ fctx->w0.auth_input_type = 1;
+ fctx->w0.mac_len = xform->digest_length;
+ fctx->flags = rte_cpu_to_be_64(fctx->flags);
+ memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
+ memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
+ memset(&fctx->auth, 0, sizeof(fctx->auth));
+ memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
+
+ ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+ ctx->req_op = (xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
+ ctx->iv.offset = xform->iv.offset;
+ ctx->iv.length = xform->iv.length;
+ ctx->digest_length = xform->digest_length;
+ ctx->aad_length = xform->aad_length;
+ return 0;
+}
+
static int
nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
struct rte_crypto_sym_xform *xform,
struct nitrox_crypto_ctx *ctx;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_aead_xform *aead_xform = NULL;
+ int ret = -EINVAL;
if (rte_mempool_get(mempool, &mp_obj)) {
NITROX_LOG(ERR, "Couldn't allocate context\n");
ctx = mp_obj;
ctx->nitrox_chain = get_crypto_chain_order(xform);
switch (ctx->nitrox_chain) {
+ case NITROX_CHAIN_CIPHER_ONLY:
+ cipher_xform = &xform->cipher;
+ break;
case NITROX_CHAIN_CIPHER_AUTH:
cipher_xform = &xform->cipher;
auth_xform = &xform->next->auth;
auth_xform = &xform->auth;
cipher_xform = &xform->next->cipher;
break;
+ case NITROX_CHAIN_COMBINED:
+ aead_xform = &xform->aead;
+ break;
default:
NITROX_LOG(ERR, "Crypto chain not supported\n");
+ ret = -ENOTSUP;
goto err;
}
goto err;
}
+ if (aead_xform && unlikely(configure_aead_ctx(aead_xform, ctx))) {
+ NITROX_LOG(ERR, "Failed to configure aead ctx\n");
+ goto err;
+ }
+
ctx->iova = rte_mempool_virt2iova(ctx);
set_sym_session_private_data(sess, cdev->driver_id, ctx);
return 0;
err:
rte_mempool_put(mempool, mp_obj);
- return -EINVAL;
+ return ret;
}
static void
rte_mempool_put(sess_mp, ctx);
}
+static struct nitrox_crypto_ctx *
+get_crypto_ctx(struct rte_crypto_op *op)
+{
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session))
+ return get_sym_session_private_data(op->sym->session,
+ nitrox_sym_drv_id);
+ }
+
+ return NULL;
+}
+
+static int
+nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op)
+{
+ struct nitrox_crypto_ctx *ctx;
+ struct nitrox_softreq *sr;
+ int err;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ ctx = get_crypto_ctx(op);
+ if (unlikely(!ctx)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr)))
+ return -ENOMEM;
+
+ err = nitrox_process_se_req(qp->qno, op, ctx, sr);
+ if (unlikely(err)) {
+ rte_mempool_put(qp->sr_mp, sr);
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return err;
+ }
+
+ nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr);
+ return 0;
+}
+
+static uint16_t
+nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct nitrox_qp *qp = queue_pair;
+ uint16_t free_slots = 0;
+ uint16_t cnt = 0;
+ bool err = false;
+
+ free_slots = nitrox_qp_free_count(qp);
+ if (nb_ops > free_slots)
+ nb_ops = free_slots;
+
+ for (cnt = 0; cnt < nb_ops; cnt++) {
+ if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) {
+ err = true;
+ break;
+ }
+ }
+
+ nitrox_ring_dbell(qp, cnt);
+ qp->stats.enqueued_count += cnt;
+ if (unlikely(err))
+ qp->stats.enqueue_err_count++;
+
+ return cnt;
+}
+
+static int
+nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr)
+{
+ struct nitrox_softreq *sr;
+ int ret;
+ struct rte_crypto_op *op;
+
+ sr = nitrox_qp_get_softreq(qp);
+ ret = nitrox_check_se_req(sr, op_ptr);
+ if (ret < 0)
+ return -EAGAIN;
+
+ op = *op_ptr;
+ nitrox_qp_dequeue(qp);
+ rte_mempool_put(qp->sr_mp, sr);
+ if (!ret) {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ qp->stats.dequeued_count++;
+
+ return 0;
+ }
+
+ if (ret == MC_MAC_MISMATCH_ERR_CODE)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ qp->stats.dequeue_err_count++;
+ return 0;
+}
+
+static uint16_t
+nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct nitrox_qp *qp = queue_pair;
+ uint16_t filled_slots = nitrox_qp_used_count(qp);
+ int cnt = 0;
+
+ if (nb_ops > filled_slots)
+ nb_ops = filled_slots;
+
+ for (cnt = 0; cnt < nb_ops; cnt++)
+ if (nitrox_deq_single_op(qp, &ops[cnt]))
+ break;
+
+ return cnt;
+}
+
static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
.dev_configure = nitrox_sym_dev_config,
.dev_start = nitrox_sym_dev_start,
struct rte_cryptodev *cdev;
rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name));
- snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN, "_n5sym");
+ snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN - strlen(name),
+ "_n5sym");
ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv;
ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node;
ndev->rte_sym_dev.devargs = NULL;
ndev->rte_sym_dev.name = cdev->data->name;
cdev->driver_id = nitrox_sym_drv_id;
cdev->dev_ops = &nitrox_cryptodev_ops;
- cdev->enqueue_burst = NULL;
- cdev->dequeue_burst = NULL;
+ cdev->enqueue_burst = nitrox_sym_dev_enq_burst;
+ cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
ndev->sym_dev = cdev->data->dev_private;
ndev->sym_dev->cdev = cdev;