* Copyright(C) 2020 Marvell International Ltd.
*/
+#include <rte_atomic.h>
#include <rte_bus_pci.h>
#include <rte_ethdev.h>
+#include <rte_spinlock.h>
#include "otx2_common.h"
#include "otx2_sec_idev.h"
+static struct otx2_sec_idev_cfg sec_cfg[OTX2_MAX_INLINE_PORTS];
+
/**
* @internal
* Check if rte_eth_dev is security offload capable otx2_eth_dev
return 0;
}
+
+int
+otx2_sec_idev_cfg_init(int port_id)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ int i;
+
+ cfg = &sec_cfg[port_id];
+ cfg->tx_cpt_idx = 0;
+ rte_spinlock_init(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ cfg->tx_cpt[i].qp = NULL;
+ rte_atomic16_set(&cfg->tx_cpt[i].ref_cnt, 0);
+ }
+
+ return 0;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ int i, ret;
+
+ if (qp == NULL || port_id > OTX2_MAX_INLINE_PORTS)
+ return -EINVAL;
+
+ cfg = &sec_cfg[port_id];
+
+ /* Find a free slot to save CPT LF */
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp == NULL) {
+ cfg->tx_cpt[i].qp = qp;
+ ret = 0;
+ goto unlock;
+ }
+ }
+
+ ret = -EINVAL;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ uint16_t port_id;
+ int i, ret;
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
+ cfg = &sec_cfg[port_id];
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp != qp)
+ continue;
+
+ /* Don't free if the QP is in use by any sec session */
+ if (rte_atomic16_read(&cfg->tx_cpt[i].ref_cnt)) {
+ ret = -EBUSY;
+ } else {
+ cfg->tx_cpt[i].qp = NULL;
+ ret = 0;
+ }
+
+ goto unlock;
+ }
+
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ }
+
+ return -ENOENT;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
#include <rte_ethdev.h>
+#define OTX2_MAX_CPT_QP_PER_PORT 64
+#define OTX2_MAX_INLINE_PORTS 64
+
+struct otx2_cpt_qp;
+
+struct otx2_sec_idev_cfg {
+ struct {
+ struct otx2_cpt_qp *qp;
+ rte_atomic16_t ref_cnt;
+ } tx_cpt[OTX2_MAX_CPT_QP_PER_PORT];
+
+ uint16_t tx_cpt_idx;
+ rte_spinlock_t tx_cpt_lock;
+};
+
uint8_t otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev);
+int otx2_sec_idev_cfg_init(int port_id);
+
+int otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp);
+
+int otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp);
+
#endif /* _OTX2_SEC_IDEV_H_ */
otx2_npa_pf_func_get;
otx2_npa_set_defaults;
otx2_register_irq;
+ otx2_sec_idev_cfg_init;
+ otx2_sec_idev_tx_cpt_qp_add;
+ otx2_sec_idev_tx_cpt_qp_remove;
otx2_sso_pf_func_get;
otx2_sso_pf_func_set;
otx2_unregister_irq;
#include "cpt_mcode_defines.h"
#include "otx2_dev.h"
+#include "otx2_cryptodev_qp.h"
/* CPT instruction queue length */
#define OTX2_CPT_IQ_LEN 8200
CPT_9X_COMP_E_LAST_ENTRY = 0x06
};
-struct otx2_cpt_qp {
- uint32_t id;
- /**< Queue pair id */
- uintptr_t base;
- /**< Base address where BAR is mapped */
- void *lmtline;
- /**< Address of LMTLINE */
- rte_iova_t lf_nq_reg;
- /**< LF enqueue register address */
- struct pending_queue pend_q;
- /**< Pending queue */
- struct rte_mempool *sess_mp;
- /**< Session mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session private data mempool */
- struct cpt_qp_meta_info meta_info;
- /**< Metabuf info required to support operations on the queue pair */
- rte_iova_t iq_dma_addr;
- /**< Instruction queue address */
-};
-
void otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev);
int otx2_cpt_err_intr_register(const struct rte_cryptodev *dev);
if (ret)
return ret;
+ /* Publish inline Tx QP to eth dev security */
+ ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
+ if (ret)
+ return ret;
+
return 0;
}
qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
+ ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ goto mempool_destroy;
+ }
+
otx2_cpt_iq_disable(qp);
ret = otx2_cpt_qp_inline_cfg(dev, qp);
char name[RTE_MEMZONE_NAMESIZE];
int ret;
+ ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ return ret;
+ }
+
otx2_cpt_iq_disable(qp);
otx2_cpt_metabuf_mempool_destroy(qp);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_QP_H_
+#define _OTX2_CRYPTODEV_QP_H_
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_spinlock.h>
+
+#include "cpt_common.h"
+
+struct otx2_cpt_qp {
+ uint32_t id;
+ /**< Queue pair id */
+ uintptr_t base;
+ /**< Base address where BAR is mapped */
+ void *lmtline;
+ /**< Address of LMTLINE */
+ rte_iova_t lf_nq_reg;
+ /**< LF enqueue register address */
+ struct pending_queue pend_q;
+ /**< Pending queue */
+ struct rte_mempool *sess_mp;
+ /**< Session mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session private data mempool */
+ struct cpt_qp_meta_info meta_info;
+ /**< Metabuf info required to support operations on the queue pair */
+ rte_iova_t iq_dma_addr;
+ /**< Instruction queue address */
+};
+
+#endif /* _OTX2_CRYPTODEV_QP_H_ */
#include <rte_security.h>
#include <rte_security_driver.h>
+#include "otx2_cryptodev_qp.h"
#include "otx2_ethdev.h"
#include "otx2_ethdev_sec.h"
#include "otx2_ipsec_fp.h"
+#include "otx2_sec_idev.h"
#define ETH_SEC_MAX_PKT_LEN 1450
otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
{
struct rte_security_ctx *ctx;
+ int ret;
ctx = rte_malloc("otx2_eth_sec_ctx",
sizeof(struct rte_security_ctx), 0);
if (ctx == NULL)
return -ENOMEM;
+ ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
+ if (ret) {
+ rte_free(ctx);
+ return ret;
+ }
+
/* Populate ctx */
ctx->device = eth_dev;