# build flags
CFLAGS += $(WERROR_FLAGS)
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_eal -lrte_ethdev -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_pci -lrte_bus_pci
LDLIBS += -lrte_common_cpt -lrte_common_octeontx2
CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2
CFLAGS += -DALLOW_EXPERIMENTAL_API
ifneq ($(CONFIG_RTE_ARCH_64),y)
deps += ['bus_pci']
deps += ['common_cpt']
deps += ['common_octeontx2']
+deps += ['ethdev']
name = 'octeontx2_crypto'
allow_experimental_apis = true
includes += include_directories('../../common/cpt')
includes += include_directories('../../common/octeontx2')
includes += include_directories('../../mempool/octeontx2')
+includes += include_directories('../../net/octeontx2')
* Copyright (C) 2019 Marvell International Ltd.
*/
#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_dev.h"
+#include "otx2_ethdev.h"
+#include "otx2_sec_idev.h"
#include "otx2_mbox.h"
#include "cpt_pmd_logs.h"
return otx2_cpt_send_mbox_msg(vf);
}
+
+int
+otx2_cpt_inline_init(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_rx_inline_lf_cfg_msg *msg;
+ int ret;
+
+ msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
+ msg->sso_pf_func = otx2_sso_pf_func_get();
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
+
+int
+otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp,
+ uint16_t port_id)
+{
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_inline_ipsec_cfg_msg *msg;
+ struct otx2_eth_dev *otx2_eth_dev;
+ int ret;
+
+ if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
+ return -EINVAL;
+
+ otx2_eth_dev = otx2_eth_pmd_priv(eth_dev);
+
+ msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
+ msg->dir = CPT_INLINE_OUTBOUND;
+ msg->enable = 1;
+ msg->slot = qp->id;
+
+ msg->nix_pf_func = otx2_eth_dev->pf_func;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
#include <rte_cryptodev.h>
+#include "otx2_cryptodev_hw_access.h"
+
int otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
uint16_t *nb_queues);
int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t val);
+int otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev,
+ struct otx2_cpt_qp *qp, uint16_t port_id);
+
+int otx2_cpt_inline_init(const struct rte_cryptodev *dev);
+
#endif /* _OTX2_CRYPTODEV_MBOX_H_ */
#include <rte_cryptodev_pmd.h>
#include <rte_errno.h>
+#include <rte_ethdev.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_cryptodev_ops.h"
#include "otx2_mbox.h"
+#include "otx2_sec_idev.h"
#include "cpt_hw_types.h"
#include "cpt_pmd_logs.h"
meta_info->sg_mlen = 0;
}
+static int
+otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
+{
+ static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
+ uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
+ int i, ret;
+
+ for (i = 0; i < nb_ethport; i++) {
+ port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
+ if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
+ break;
+ }
+
+ if (i >= nb_ethport)
+ return 0;
+
+ ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct otx2_cpt_qp *
otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
uint8_t group)
otx2_cpt_iq_disable(qp);
+ ret = otx2_cpt_qp_inline_cfg(dev, qp);
+ if (ret) {
+ CPT_LOG_ERR("Could not configure queue for inline IPsec");
+ goto mempool_destroy;
+ }
+
ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
size_div40);
if (ret) {
goto queues_detach;
}
+ ret = otx2_cpt_inline_init(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not enable inline IPsec");
+ goto intr_unregister;
+ }
+
dev->enqueue_burst = otx2_cpt_enqueue_burst;
dev->dequeue_burst = otx2_cpt_dequeue_burst;
rte_mb();
return 0;
+intr_unregister:
+ otx2_cpt_err_intr_unregister(dev);
queues_detach:
otx2_cpt_queues_detach(dev);
return ret;