#include "rte_pmd_mlx5.h"
#include "mlx5_verbs.h"
#include "mlx5_nl.h"
+#include "mlx5_devx.h"
#define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
goto error;
}
}
+ if (config->devx && config->dv_flow_en)
+ priv->obj_ops = &devx_obj_ops;
+ else
+ priv->obj_ops = &ibv_obj_ops;
return eth_dev;
error:
if (priv) {
#include <stdint.h>
#include <unistd.h>
#include <inttypes.h>
+#include <sys/queue.h>
#include "mlx5_autoconf.h"
#include <mlx5_common_mr.h>
#include <mlx5_rxtx.h>
#include <mlx5_verbs.h>
+#include <mlx5_utils.h>
+#include <mlx5_malloc.h>
+
/**
* Register mr. Given protection domain pointer, pointer to addr and length
* register the memory region.
return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
}
+/**
+ * Create a CQ Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param priv
+ * Pointer to device private data.
+ * @param rxq_data
+ * Pointer to Rx queue data.
+ * @param cqe_n
+ * Number of CQEs in CQ.
+ * @param rxq_obj
+ * Pointer to Rx queue object data.
+ *
+ * @return
+ * The Verbs object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct ibv_cq *
+mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
+ struct mlx5_rxq_data *rxq_data,
+ unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
+{
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq_attr;
+
+ cq_attr.ibv = (struct ibv_cq_init_attr_ex){
+ .cqe = cqe_n,
+ .channel = rxq_obj->ibv_channel,
+ .comp_mask = 0,
+ };
+ cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
+ .comp_mask = 0,
+ };
+ if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
+ cq_attr.mlx5.comp_mask |=
+ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ cq_attr.mlx5.cqe_comp_res_format =
+ mlx5_rxq_mprq_enabled(rxq_data) ?
+ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
+#else
+ cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
+ cq_attr.ibv.cqe *= 2;
+ } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
+ DRV_LOG(DEBUG,
+ "Port %u Rx CQE compression is disabled for HW"
+ " timestamp.",
+ dev->data->port_id);
+ }
+#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
+ if (priv->config.cqe_pad) {
+ cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
+ cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
+ }
+#endif
+ return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
+ &cq_attr.ibv,
+ &cq_attr.mlx5));
+}
+
+/**
+ * Create a WQ Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param priv
+ * Pointer to device private data.
+ * @param rxq_data
+ * Pointer to Rx queue data.
+ * @param idx
+ * Queue index in DPDK Rx queue array.
+ * @param wqe_n
+ * Number of WQEs in WQ.
+ * @param rxq_obj
+ * Pointer to Rx queue object data.
+ *
+ * @return
+ * The Verbs object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct ibv_wq *
+mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
+ struct mlx5_rxq_data *rxq_data, uint16_t idx,
+ unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
+{
+ struct {
+ struct ibv_wq_init_attr ibv;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ struct mlx5dv_wq_init_attr mlx5;
+#endif
+ } wq_attr;
+
+ wq_attr.ibv = (struct ibv_wq_init_attr){
+ .wq_context = NULL, /* Could be useful in the future. */
+ .wq_type = IBV_WQT_RQ,
+ /* Max number of outstanding WRs. */
+ .max_wr = wqe_n >> rxq_data->sges_n,
+ /* Max number of scatter/gather elements in a WR. */
+ .max_sge = 1 << rxq_data->sges_n,
+ .pd = priv->sh->pd,
+ .cq = rxq_obj->ibv_cq,
+ .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
+ .create_flags = (rxq_data->vlan_strip ?
+ IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
+ };
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (rxq_data->crc_present) {
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
+ if (priv->config.hw_padding) {
+#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+#endif
+ }
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
+ .comp_mask = 0,
+ };
+ if (mlx5_rxq_mprq_enabled(rxq_data)) {
+ struct mlx5dv_striding_rq_init_attr *mprq_attr =
+ &wq_attr.mlx5.striding_rq_attrs;
+
+ wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
+ .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
+ .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
+ .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
+ };
+ }
+ rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
+ &wq_attr.mlx5);
+#else
+ rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
+#endif
+ if (rxq_obj->wq) {
+ /*
+ * Make sure number of WRs*SGEs match expectations since a queue
+ * cannot allocate more than "desc" buffers.
+ */
+ if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
+ wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
+ DRV_LOG(ERR,
+ "Port %u Rx queue %u requested %u*%u but got"
+ " %u*%u WRs*SGEs.",
+ dev->data->port_id, idx,
+ wqe_n >> rxq_data->sges_n,
+ (1 << rxq_data->sges_n),
+ wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
+ claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+ rxq_obj->wq = NULL;
+ rte_errno = EINVAL;
+ }
+ }
+ return rxq_obj->wq;
+}
+
+/**
+ * Create the Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array.
+ *
+ * @return
+ * The Verbs object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct ibv_wq_attr mod;
+ unsigned int cqe_n;
+ unsigned int wqe_n = 1 << rxq_data->elts_n;
+ struct mlx5_rxq_obj *tmpl = NULL;
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_rwq rwq;
+ int ret = 0;
+ struct mlx5dv_obj obj;
+
+ MLX5_ASSERT(rxq_data);
+ MLX5_ASSERT(!rxq_ctrl->obj);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
+ priv->verbs_alloc_ctx.obj = rxq_ctrl;
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
+ DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
+ dev->data->port_id, rxq_data->idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ tmpl->type = MLX5_RXQ_OBJ_TYPE_IBV;
+ tmpl->rxq_ctrl = rxq_ctrl;
+ if (rxq_ctrl->irq) {
+ tmpl->ibv_channel =
+ mlx5_glue->create_comp_channel(priv->sh->ctx);
+ if (!tmpl->ibv_channel) {
+ DRV_LOG(ERR, "Port %u: comp channel creation failure.",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
+ }
+ if (mlx5_rxq_mprq_enabled(rxq_data))
+ cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ else
+ cqe_n = wqe_n - 1;
+ DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->sh->device_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
+ dev->data->port_id, priv->sh->device_attr.max_sge);
+ /* Create CQ using Verbs API. */
+ tmpl->ibv_cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
+ if (!tmpl->ibv_cq) {
+ DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ obj.cq.in = tmpl->ibv_cq;
+ obj.cq.out = &cq_info;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
+ if (ret) {
+ rte_errno = ret;
+ goto error;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ DRV_LOG(ERR,
+ "Port %u wrong MLX5_CQE_SIZE environment "
+ "variable value: it should be set to %u.",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ /* Fill the rings. */
+ rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ rxq_data->cq_db = cq_info.dbrec;
+ rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
+ rxq_data->cq_uar = cq_info.cq_uar;
+ rxq_data->cqn = cq_info.cqn;
+ /* Create WQ (RQ) using Verbs API. */
+ tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n, tmpl);
+ if (!tmpl->wq) {
+ DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ mod = (struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ };
+ ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
+ if (ret) {
+ DRV_LOG(ERR,
+ "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
+ dev->data->port_id, idx);
+ rte_errno = ret;
+ goto error;
+ }
+ obj.rwq.in = tmpl->wq;
+ obj.rwq.out = &rwq;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
+ if (ret) {
+ rte_errno = ret;
+ goto error;
+ }
+ rxq_data->wqes = rwq.buf;
+ rxq_data->rq_db = rwq.dbrec;
+ rxq_data->cq_arm_sn = 0;
+ mlx5_rxq_initialize(rxq_data);
+ rxq_data->cq_ci = 0;
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
+ return tmpl;
+error:
+ if (tmpl) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->wq)
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+ if (tmpl->ibv_cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
+ if (tmpl->ibv_channel)
+ claim_zero(mlx5_glue->destroy_comp_channel
+ (tmpl->ibv_channel));
+ mlx5_free(tmpl);
+ rte_errno = ret; /* Restore rte_errno. */
+ }
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return NULL;
+}
+
+/**
+ * Release an Rx verbs queue object.
+ *
+ * @param rxq_obj
+ * Verbs Rx queue object.
+ */
+static void
+mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)
+{
+ MLX5_ASSERT(rxq_obj);
+ MLX5_ASSERT(rxq_obj->wq);
+ MLX5_ASSERT(rxq_obj->ibv_cq);
+ rxq_free_elts(rxq_obj->rxq_ctrl);
+ claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+ claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
+ if (rxq_obj->ibv_channel)
+ claim_zero(mlx5_glue->destroy_comp_channel
+ (rxq_obj->ibv_channel));
+ LIST_REMOVE(rxq_obj, next);
+ mlx5_free(rxq_obj);
+}
+
struct mlx5_obj_ops ibv_obj_ops = {
.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
+ .rxq_obj_new = mlx5_rxq_ibv_obj_new,
+ .rxq_obj_release = mlx5_rxq_ibv_obj_release,
};
#ifndef RTE_PMD_MLX5_VERBS_H_
#define RTE_PMD_MLX5_VERBS_H_
+#include "mlx5.h"
+
struct mlx5_verbs_ops {
mlx5_reg_mr_t reg_mr;
mlx5_dereg_mr_t dereg_mr;
/* Verbs ops struct */
extern const struct mlx5_verbs_ops mlx5_verbs_ops;
+extern struct mlx5_obj_ops ibv_obj_ops;
+
#endif /* RTE_PMD_MLX5_VERBS_H_ */
'mlx5_txpp.c',
'mlx5_vlan.c',
'mlx5_utils.c',
+ 'mlx5_devx.c',
)
if (dpdk_conf.has('RTE_ARCH_X86_64')
or dpdk_conf.has('RTE_ARCH_ARM64')
#define MLX5_PROC_PRIV(port_id) \
((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
+enum mlx5_rxq_obj_type {
+ MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */
+ MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */
+ MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN,
+ /* mlx5_rxq_obj with mlx5_devx_rq and hairpin support. */
+};
+
+/* Verbs/DevX Rx queue elements. */
+struct mlx5_rxq_obj {
+ LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
+ struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
+ enum mlx5_rxq_obj_type type;
+ int fd; /* File descriptor for event channel */
+ RTE_STD_C11
+ union {
+ struct {
+ void *wq; /* Work Queue. */
+ void *ibv_cq; /* Completion Queue. */
+ void *ibv_channel;
+ };
+ struct {
+ struct mlx5_devx_obj *rq; /* DevX Rx Queue object. */
+ struct mlx5_devx_obj *devx_cq; /* DevX CQ object. */
+ void *devx_channel;
+ };
+ };
+};
+
/* HW objects operations structure. */
struct mlx5_obj_ops {
int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
+ struct mlx5_rxq_obj *(*rxq_obj_new)(struct rte_eth_dev *dev,
+ uint16_t idx);
+ void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
};
struct mlx5_priv {
return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
}
+/**
+ * Release the resources allocated for an RQ DevX object.
+ *
+ * @param rxq_ctrl
+ * DevX Rx queue object.
+ */
+static void
+rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ if (rxq_ctrl->rxq.wqes) {
+ mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
+ rxq_ctrl->rxq.wqes = NULL;
+ }
+ if (rxq_ctrl->wq_umem) {
+ mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
+ rxq_ctrl->wq_umem = NULL;
+ }
+}
+
+/**
+ * Release the resources allocated for the Rx CQ DevX object.
+ *
+ * @param rxq_ctrl
+ * DevX Rx queue object.
+ */
+static void
+rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ if (rxq_ctrl->rxq.cqes) {
+ rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
+ rxq_ctrl->rxq.cqes = NULL;
+ }
+ if (rxq_ctrl->cq_umem) {
+ mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
+ rxq_ctrl->cq_umem = NULL;
+ }
+}
+
+/**
+ * Release an Rx hairpin related resources.
+ *
+ * @param rxq_obj
+ * Hairpin Rx queue object.
+ */
+static void
+mlx5_rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
+{
+ struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
+
+ MLX5_ASSERT(rxq_obj);
+ rq_attr.state = MLX5_RQC_STATE_RST;
+ rq_attr.rq_state = MLX5_RQC_STATE_RDY;
+ mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
+ claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
+}
+
+/**
+ * Release an Rx DevX queue object.
+ *
+ * @param rxq_obj
+ * DevX Rx queue object.
+ */
+static void
+mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
+{
+ struct mlx5_priv *priv = rxq_obj->rxq_ctrl->priv;
+
+ MLX5_ASSERT(rxq_obj);
+ MLX5_ASSERT(rxq_obj->rq);
+ if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
+ mlx5_rxq_obj_hairpin_release(rxq_obj);
+ } else {
+ MLX5_ASSERT(rxq_obj->devx_cq);
+ rxq_free_elts(rxq_obj->rxq_ctrl);
+ claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
+ claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
+ claim_zero(mlx5_release_dbr(&priv->dbrpgs,
+ rxq_obj->rxq_ctrl->rq_dbr_umem_id,
+ rxq_obj->rxq_ctrl->rq_dbr_offset));
+ claim_zero(mlx5_release_dbr(&priv->dbrpgs,
+ rxq_obj->rxq_ctrl->cq_dbr_umem_id,
+ rxq_obj->rxq_ctrl->cq_dbr_offset));
+ if (rxq_obj->devx_channel)
+ mlx5_glue->devx_destroy_event_channel
+ (rxq_obj->devx_channel);
+ rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
+ rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
+ }
+ LIST_REMOVE(rxq_obj, next);
+ mlx5_free(rxq_obj);
+}
+
+/**
+ * Fill common fields of create RQ attributes structure.
+ *
+ * @param rxq_data
+ * Pointer to Rx queue data.
+ * @param cqn
+ * CQ number to use with this RQ.
+ * @param rq_attr
+ * RQ attributes structure to fill..
+ */
+static void
+mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
+ struct mlx5_devx_create_rq_attr *rq_attr)
+{
+ rq_attr->state = MLX5_RQC_STATE_RST;
+ rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
+ rq_attr->cqn = cqn;
+ rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
+}
+
+/**
+ * Fill common fields of DevX WQ attributes structure.
+ *
+ * @param priv
+ * Pointer to device private data.
+ * @param rxq_ctrl
+ * Pointer to Rx queue control structure.
+ * @param wq_attr
+ * WQ attributes structure to fill..
+ */
+static void
+mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
+ struct mlx5_devx_wq_attr *wq_attr)
+{
+ wq_attr->end_padding_mode = priv->config.cqe_pad ?
+ MLX5_WQ_END_PAD_MODE_ALIGN :
+ MLX5_WQ_END_PAD_MODE_NONE;
+ wq_attr->pd = priv->sh->pdn;
+ wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
+ wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
+ wq_attr->dbr_umem_valid = 1;
+ wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
+ wq_attr->wq_umem_valid = 1;
+}
+
+/**
+ * Create a RQ object using DevX.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array.
+ * @param cqn
+ * CQ number to use with this RQ.
+ *
+ * @return
+ * The DevX object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_devx_obj *
+mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_devx_create_rq_attr rq_attr = { 0 };
+ uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
+ uint32_t wq_size = 0;
+ uint32_t wqe_size = 0;
+ uint32_t log_wqe_size = 0;
+ void *buf = NULL;
+ struct mlx5_devx_obj *rq;
+
+ /* Fill RQ attributes. */
+ rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
+ rq_attr.flush_in_error_en = 1;
+ mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
+ /* Fill WQ attributes for this RQ. */
+ if (mlx5_rxq_mprq_enabled(rxq_data)) {
+ rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
+ /*
+ * Number of strides in each WQE:
+ * 512*2^single_wqe_log_num_of_strides.
+ */
+ rq_attr.wq_attr.single_wqe_log_num_of_strides =
+ rxq_data->strd_num_n -
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
+ rq_attr.wq_attr.single_stride_log_num_of_bytes =
+ rxq_data->strd_sz_n -
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ wqe_size = sizeof(struct mlx5_wqe_mprq);
+ } else {
+ rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
+ wqe_size = sizeof(struct mlx5_wqe_data_seg);
+ }
+ log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
+ rq_attr.wq_attr.log_wq_stride = log_wqe_size;
+ rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
+ /* Calculate and allocate WQ memory space. */
+ wqe_size = 1 << log_wqe_size; /* round up power of two.*/
+ wq_size = wqe_n * wqe_size;
+ size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
+ alignment, rxq_ctrl->socket);
+ if (!buf)
+ return NULL;
+ rxq_data->wqes = buf;
+ rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
+ buf, wq_size, 0);
+ if (!rxq_ctrl->wq_umem) {
+ mlx5_free(buf);
+ return NULL;
+ }
+ mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
+ rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
+ if (!rq)
+ rxq_release_devx_rq_resources(rxq_ctrl);
+ return rq;
+}
+
+/**
+ * Create a DevX CQ object for an Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param cqe_n
+ * Number of CQEs in CQ.
+ * @param idx
+ * Queue index in DPDK Rx queue array.
+ * @param rxq_obj
+ * Pointer to Rx queue object data.
+ *
+ * @return
+ * The DevX object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_devx_obj *
+mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
+ struct mlx5_rxq_obj *rxq_obj)
+{
+ struct mlx5_devx_obj *cq_obj = 0;
+ struct mlx5_devx_cq_attr cq_attr = { 0 };
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ size_t page_size = rte_mem_page_size();
+ uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
+ uint32_t eqn = 0;
+ void *buf = NULL;
+ uint16_t event_nums[1] = {0};
+ uint32_t log_cqe_n;
+ uint32_t cq_size;
+ int ret = 0;
+
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get page_size.");
+ goto error;
+ }
+ if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
+ !rxq_data->lro) {
+ cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ cq_attr.mini_cqe_res_format =
+ mlx5_rxq_mprq_enabled(rxq_data) ?
+ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
+#else
+ cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
+ cqe_n *= 2;
+ } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
+ DRV_LOG(DEBUG,
+ "Port %u Rx CQE compression is disabled for HW"
+ " timestamp.",
+ dev->data->port_id);
+ } else if (priv->config.cqe_comp && rxq_data->lro) {
+ DRV_LOG(DEBUG,
+ "Port %u Rx CQE compression is disabled for LRO.",
+ dev->data->port_id);
+ }
+#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
+ if (priv->config.cqe_pad)
+ cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
+#endif
+ log_cqe_n = log2above(cqe_n);
+ cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
+ /* Query the EQN for this core. */
+ if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
+ DRV_LOG(ERR, "Failed to query EQN for CQ.");
+ goto error;
+ }
+ cq_attr.eqn = eqn;
+ buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
+ rxq_ctrl->socket);
+ if (!buf) {
+ DRV_LOG(ERR, "Failed to allocate memory for CQ.");
+ goto error;
+ }
+ rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
+ rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
+ cq_size,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (!rxq_ctrl->cq_umem) {
+ DRV_LOG(ERR, "Failed to register umem for CQ.");
+ goto error;
+ }
+ cq_attr.uar_page_id =
+ mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
+ cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
+ cq_attr.q_umem_valid = 1;
+ cq_attr.log_cq_size = log_cqe_n;
+ cq_attr.log_page_size = rte_log2_u32(page_size);
+ cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
+ cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
+ cq_attr.db_umem_valid = 1;
+ cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
+ if (!cq_obj)
+ goto error;
+ rxq_data->cqe_n = log_cqe_n;
+ rxq_data->cqn = cq_obj->id;
+ if (rxq_obj->devx_channel) {
+ ret = mlx5_glue->devx_subscribe_devx_event
+ (rxq_obj->devx_channel,
+ cq_obj->obj,
+ sizeof(event_nums),
+ event_nums,
+ (uint64_t)(uintptr_t)cq_obj);
+ if (ret) {
+ DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
+ rte_errno = errno;
+ goto error;
+ }
+ }
+ /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
+ memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
+ return cq_obj;
+error:
+ if (cq_obj)
+ mlx5_devx_cmd_destroy(cq_obj);
+ rxq_release_devx_cq_resources(rxq_ctrl);
+ return NULL;
+}
+
+/**
+ * Create the Rx hairpin queue object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array.
+ *
+ * @return
+ * The hairpin DevX object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_devx_create_rq_attr attr = { 0 };
+ struct mlx5_rxq_obj *tmpl = NULL;
+ uint32_t max_wq_data;
+
+ MLX5_ASSERT(rxq_data);
+ MLX5_ASSERT(!rxq_ctrl->obj);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
+ DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
+ dev->data->port_id, rxq_data->idx);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
+ tmpl->rxq_ctrl = rxq_ctrl;
+ attr.hairpin = 1;
+ max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ /* Jumbo frames > 9KB should be supported, and more packets. */
+ if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
+ if (priv->config.log_hp_size > max_wq_data) {
+ DRV_LOG(ERR, "Total data size %u power of 2 is "
+ "too large for hairpin.",
+ priv->config.log_hp_size);
+ mlx5_free(tmpl);
+ rte_errno = ERANGE;
+ return NULL;
+ }
+ attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
+ } else {
+ attr.wq_attr.log_hairpin_data_sz =
+ (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
+ max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
+ }
+ /* Set the packets number to the maximum value for performance. */
+ attr.wq_attr.log_hairpin_num_packets =
+ attr.wq_attr.log_hairpin_data_sz -
+ MLX5_HAIRPIN_QUEUE_STRIDE;
+ tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
+ rxq_ctrl->socket);
+ if (!tmpl->rq) {
+ DRV_LOG(ERR,
+ "Port %u Rx hairpin queue %u can't create rq object.",
+ dev->data->port_id, idx);
+ mlx5_free(tmpl);
+ rte_errno = errno;
+ return NULL;
+ }
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
+ dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
+ return tmpl;
+}
+
+/**
+ * Create the Rx queue DevX object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array.
+ *
+ * @return
+ * The DevX object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ unsigned int cqe_n;
+ unsigned int wqe_n = 1 << rxq_data->elts_n;
+ struct mlx5_rxq_obj *tmpl = NULL;
+ struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
+ struct mlx5_devx_dbr_page *cq_dbr_page = NULL;
+ struct mlx5_devx_dbr_page *rq_dbr_page = NULL;
+ int64_t dbr_offset;
+ int ret = 0;
+
+ MLX5_ASSERT(rxq_data);
+ MLX5_ASSERT(!rxq_ctrl->obj);
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ return mlx5_rxq_obj_hairpin_new(dev, idx);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
+ DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
+ dev->data->port_id, rxq_data->idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
+ tmpl->rxq_ctrl = rxq_ctrl;
+ if (rxq_ctrl->irq) {
+ int devx_ev_flag =
+ MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
+
+ tmpl->devx_channel = mlx5_glue->devx_create_event_channel
+ (priv->sh->ctx,
+ devx_ev_flag);
+ if (!tmpl->devx_channel) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create event channel %d.",
+ rte_errno);
+ goto error;
+ }
+ tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
+ }
+ if (mlx5_rxq_mprq_enabled(rxq_data))
+ cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ else
+ cqe_n = wqe_n - 1;
+ DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->sh->device_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
+ dev->data->port_id, priv->sh->device_attr.max_sge);
+ /* Allocate CQ door-bell. */
+ dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &cq_dbr_page);
+ if (dbr_offset < 0) {
+ DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
+ goto error;
+ }
+ rxq_ctrl->cq_dbr_offset = dbr_offset;
+ rxq_ctrl->cq_dbr_umem_id = mlx5_os_get_umem_id(cq_dbr_page->umem);
+ rxq_data->cq_db = (uint32_t *)((uintptr_t)cq_dbr_page->dbrs +
+ (uintptr_t)rxq_ctrl->cq_dbr_offset);
+ rxq_data->cq_uar =
+ mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
+ /* Create CQ using DevX API. */
+ tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
+ if (!tmpl->devx_cq) {
+ DRV_LOG(ERR, "Failed to create CQ.");
+ goto error;
+ }
+ /* Allocate RQ door-bell. */
+ dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &rq_dbr_page);
+ if (dbr_offset < 0) {
+ DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
+ goto error;
+ }
+ rxq_ctrl->rq_dbr_offset = dbr_offset;
+ rxq_ctrl->rq_dbr_umem_id = mlx5_os_get_umem_id(rq_dbr_page->umem);
+ rxq_data->rq_db = (uint32_t *)((uintptr_t)rq_dbr_page->dbrs +
+ (uintptr_t)rxq_ctrl->rq_dbr_offset);
+ /* Create RQ using DevX API. */
+ tmpl->rq = mlx5_devx_rq_new(dev, idx, tmpl->devx_cq->id);
+ if (!tmpl->rq) {
+ DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ rq_attr.rq_state = MLX5_RQC_STATE_RST;
+ rq_attr.state = MLX5_RQC_STATE_RDY;
+ ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
+ if (ret)
+ goto error;
+ rxq_data->cq_arm_sn = 0;
+ mlx5_rxq_initialize(rxq_data);
+ rxq_data->cq_ci = 0;
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
+ dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq_ctrl->wqn = tmpl->rq->id;
+ return tmpl;
+error:
+ if (tmpl) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->rq)
+ claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
+ if (tmpl->devx_cq)
+ claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
+ if (tmpl->devx_channel)
+ mlx5_glue->devx_destroy_event_channel
+ (tmpl->devx_channel);
+ mlx5_free(tmpl);
+ rte_errno = ret; /* Restore rte_errno. */
+ }
+ if (rq_dbr_page)
+ claim_zero(mlx5_release_dbr(&priv->dbrpgs,
+ rxq_ctrl->rq_dbr_umem_id,
+ rxq_ctrl->rq_dbr_offset));
+ if (cq_dbr_page)
+ claim_zero(mlx5_release_dbr(&priv->dbrpgs,
+ rxq_ctrl->cq_dbr_umem_id,
+ rxq_ctrl->cq_dbr_offset));
+ rxq_release_devx_rq_resources(rxq_ctrl);
+ rxq_release_devx_cq_resources(rxq_ctrl);
+ return NULL;
+}
+
struct mlx5_obj_ops devx_obj_ops = {
.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
+ .rxq_obj_new = mlx5_rxq_devx_obj_new,
+ .rxq_obj_release = mlx5_rxq_devx_obj_release,
};
* @param rxq_ctrl
* Pointer to RX queue structure.
*/
-static void
+void
rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
}
-/**
- * Release the resources allocated for an RQ DevX object.
- *
- * @param rxq_ctrl
- * DevX Rx queue object.
- */
-static void
-rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
-{
- if (rxq_ctrl->rxq.wqes) {
- mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
- rxq_ctrl->rxq.wqes = NULL;
- }
- if (rxq_ctrl->wq_umem) {
- mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
- rxq_ctrl->wq_umem = NULL;
- }
-}
-
-/**
- * Release the resources allocated for the Rx CQ DevX object.
- *
- * @param rxq_ctrl
- * DevX Rx queue object.
- */
-static void
-rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
-{
- if (rxq_ctrl->rxq.cqes) {
- rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
- rxq_ctrl->rxq.cqes = NULL;
- }
- if (rxq_ctrl->cq_umem) {
- mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
- rxq_ctrl->cq_umem = NULL;
- }
-}
-
-/**
- * Release an Rx hairpin related resources.
- *
- * @param rxq_obj
- * Hairpin Rx queue object.
- */
-static void
-rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
-{
- struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
-
- MLX5_ASSERT(rxq_obj);
- rq_attr.state = MLX5_RQC_STATE_RST;
- rq_attr.rq_state = MLX5_RQC_STATE_RDY;
- mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
- claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
-}
-
-/**
- * Release an Rx verbs/DevX queue object.
- *
- * @param rxq_obj
- * Verbs/DevX Rx queue object.
- */
-static void
-mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
-{
- struct mlx5_priv *priv = rxq_obj->rxq_ctrl->priv;
- struct mlx5_rxq_ctrl *rxq_ctrl = rxq_obj->rxq_ctrl;
-
- MLX5_ASSERT(rxq_obj);
- switch (rxq_obj->type) {
- case MLX5_RXQ_OBJ_TYPE_IBV:
- MLX5_ASSERT(rxq_obj->wq);
- MLX5_ASSERT(rxq_obj->ibv_cq);
- rxq_free_elts(rxq_ctrl);
- claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
- claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
- if (rxq_obj->ibv_channel)
- claim_zero(mlx5_glue->destroy_comp_channel
- (rxq_obj->ibv_channel));
- break;
- case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
- MLX5_ASSERT(rxq_obj->rq);
- MLX5_ASSERT(rxq_obj->devx_cq);
- rxq_free_elts(rxq_ctrl);
- claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
- claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
- claim_zero(mlx5_release_dbr(&priv->dbrpgs,
- rxq_ctrl->rq_dbr_umem_id,
- rxq_ctrl->rq_dbr_offset));
- claim_zero(mlx5_release_dbr(&priv->dbrpgs,
- rxq_ctrl->cq_dbr_umem_id,
- rxq_ctrl->cq_dbr_offset));
- if (rxq_obj->devx_channel)
- mlx5_glue->devx_destroy_event_channel
- (rxq_obj->devx_channel);
- rxq_release_devx_rq_resources(rxq_ctrl);
- rxq_release_devx_cq_resources(rxq_ctrl);
- break;
- case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
- MLX5_ASSERT(rxq_obj->rq);
- rxq_obj_hairpin_release(rxq_obj);
- break;
- }
- LIST_REMOVE(rxq_obj, next);
- mlx5_free(rxq_obj);
-}
-
/**
* Allocate queue vector and fill epoll fd list for Rx interrupts.
*
return -rte_errno;
}
-/**
- * Create a CQ Verbs object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param priv
- * Pointer to device private data.
- * @param rxq_data
- * Pointer to Rx queue data.
- * @param cqe_n
- * Number of CQEs in CQ.
- * @param rxq_obj
- * Pointer to Rx queue object data.
- *
- * @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
- */
-static struct ibv_cq *
-mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
- struct mlx5_rxq_data *rxq_data,
- unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
-{
- struct {
- struct ibv_cq_init_attr_ex ibv;
- struct mlx5dv_cq_init_attr mlx5;
- } cq_attr;
-
- cq_attr.ibv = (struct ibv_cq_init_attr_ex){
- .cqe = cqe_n,
- .channel = rxq_obj->ibv_channel,
- .comp_mask = 0,
- };
- cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
- .comp_mask = 0,
- };
- if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
- cq_attr.mlx5.comp_mask |=
- MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- cq_attr.mlx5.cqe_comp_res_format =
- mlx5_rxq_mprq_enabled(rxq_data) ?
- MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
- MLX5DV_CQE_RES_FORMAT_HASH;
-#else
- cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
-#endif
- /*
- * For vectorized Rx, it must not be doubled in order to
- * make cq_ci and rq_ci aligned.
- */
- if (mlx5_rxq_check_vec_support(rxq_data) < 0)
- cq_attr.ibv.cqe *= 2;
- } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
- DRV_LOG(DEBUG,
- "port %u Rx CQE compression is disabled for HW"
- " timestamp",
- dev->data->port_id);
- }
-#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- if (priv->config.cqe_pad) {
- cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
- cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
- }
-#endif
- return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
- &cq_attr.ibv,
- &cq_attr.mlx5));
-}
-
-/**
- * Create a WQ Verbs object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param priv
- * Pointer to device private data.
- * @param rxq_data
- * Pointer to Rx queue data.
- * @param idx
- * Queue index in DPDK Rx queue array
- * @param wqe_n
- * Number of WQEs in WQ.
- * @param rxq_obj
- * Pointer to Rx queue object data.
- *
- * @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
- */
-static struct ibv_wq *
-mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
- struct mlx5_rxq_data *rxq_data, uint16_t idx,
- unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
-{
- struct {
- struct ibv_wq_init_attr ibv;
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- struct mlx5dv_wq_init_attr mlx5;
-#endif
- } wq_attr;
-
- wq_attr.ibv = (struct ibv_wq_init_attr){
- .wq_context = NULL, /* Could be useful in the future. */
- .wq_type = IBV_WQT_RQ,
- /* Max number of outstanding WRs. */
- .max_wr = wqe_n >> rxq_data->sges_n,
- /* Max number of scatter/gather elements in a WR. */
- .max_sge = 1 << rxq_data->sges_n,
- .pd = priv->sh->pd,
- .cq = rxq_obj->ibv_cq,
- .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
- .create_flags = (rxq_data->vlan_strip ?
- IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
- };
- /* By default, FCS (CRC) is stripped by hardware. */
- if (rxq_data->crc_present) {
- wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
- wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
- }
- if (priv->config.hw_padding) {
-#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
- wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
- wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
-#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
- wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
- wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
-#endif
- }
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
- .comp_mask = 0,
- };
- if (mlx5_rxq_mprq_enabled(rxq_data)) {
- struct mlx5dv_striding_rq_init_attr *mprq_attr =
- &wq_attr.mlx5.striding_rq_attrs;
-
- wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
- *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
- .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
- .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
- .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
- };
- }
- rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
- &wq_attr.mlx5);
-#else
- rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
-#endif
- if (rxq_obj->wq) {
- /*
- * Make sure number of WRs*SGEs match expectations since a queue
- * cannot allocate more than "desc" buffers.
- */
- if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
- wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
- DRV_LOG(ERR,
- "port %u Rx queue %u requested %u*%u but got"
- " %u*%u WRs*SGEs",
- dev->data->port_id, idx,
- wqe_n >> rxq_data->sges_n,
- (1 << rxq_data->sges_n),
- wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
- claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
- rxq_obj->wq = NULL;
- rte_errno = EINVAL;
- }
- }
- return rxq_obj->wq;
-}
-
-/**
- * Fill common fields of create RQ attributes structure.
- *
- * @param rxq_data
- * Pointer to Rx queue data.
- * @param cqn
- * CQ number to use with this RQ.
- * @param rq_attr
- * RQ attributes structure to fill..
- */
-static void
-mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
- struct mlx5_devx_create_rq_attr *rq_attr)
-{
- rq_attr->state = MLX5_RQC_STATE_RST;
- rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
- rq_attr->cqn = cqn;
- rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
-}
-
-/**
- * Fill common fields of DevX WQ attributes structure.
- *
- * @param priv
- * Pointer to device private data.
- * @param rxq_ctrl
- * Pointer to Rx queue control structure.
- * @param wq_attr
- * WQ attributes structure to fill..
- */
-static void
-mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
- struct mlx5_devx_wq_attr *wq_attr)
-{
- wq_attr->end_padding_mode = priv->config.cqe_pad ?
- MLX5_WQ_END_PAD_MODE_ALIGN :
- MLX5_WQ_END_PAD_MODE_NONE;
- wq_attr->pd = priv->sh->pdn;
- wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
- wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
- wq_attr->dbr_umem_valid = 1;
- wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
- wq_attr->wq_umem_valid = 1;
-}
-
-/**
- * Create a RQ object using DevX.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array
- * @param cqn
- * CQ number to use with this RQ.
- *
- * @return
- * The DevX object initialised, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_devx_obj *
-mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- struct mlx5_devx_create_rq_attr rq_attr = { 0 };
- uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
- uint32_t wq_size = 0;
- uint32_t wqe_size = 0;
- uint32_t log_wqe_size = 0;
- void *buf = NULL;
- struct mlx5_devx_obj *rq;
-
- /* Fill RQ attributes. */
- rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
- rq_attr.flush_in_error_en = 1;
- mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
- /* Fill WQ attributes for this RQ. */
- if (mlx5_rxq_mprq_enabled(rxq_data)) {
- rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
- /*
- * Number of strides in each WQE:
- * 512*2^single_wqe_log_num_of_strides.
- */
- rq_attr.wq_attr.single_wqe_log_num_of_strides =
- rxq_data->strd_num_n -
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
- /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
- rq_attr.wq_attr.single_stride_log_num_of_bytes =
- rxq_data->strd_sz_n -
- MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
- wqe_size = sizeof(struct mlx5_wqe_mprq);
- } else {
- rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
- wqe_size = sizeof(struct mlx5_wqe_data_seg);
- }
- log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
- rq_attr.wq_attr.log_wq_stride = log_wqe_size;
- rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
- /* Calculate and allocate WQ memory space. */
- wqe_size = 1 << log_wqe_size; /* round up power of two.*/
- wq_size = wqe_n * wqe_size;
- size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
- if (alignment == (size_t)-1) {
- DRV_LOG(ERR, "Failed to get mem page size");
- rte_errno = ENOMEM;
- return NULL;
- }
- buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
- alignment, rxq_ctrl->socket);
- if (!buf)
- return NULL;
- rxq_data->wqes = buf;
- rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
- buf, wq_size, 0);
- if (!rxq_ctrl->wq_umem) {
- mlx5_free(buf);
- return NULL;
- }
- mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
- rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
- if (!rq)
- rxq_release_devx_rq_resources(rxq_ctrl);
- return rq;
-}
-
-/**
- * Create a DevX CQ object for an Rx queue.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param cqe_n
- * Number of CQEs in CQ.
- * @param idx
- * Queue index in DPDK Rx queue array
- * @param rxq_obj
- * Pointer to Rx queue object data.
- *
- * @return
- * The DevX object initialised, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_devx_obj *
-mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
- struct mlx5_rxq_obj *rxq_obj)
-{
- struct mlx5_devx_obj *cq_obj = 0;
- struct mlx5_devx_cq_attr cq_attr = { 0 };
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- size_t page_size = rte_mem_page_size();
- uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
- uint32_t eqn = 0;
- void *buf = NULL;
- uint16_t event_nums[1] = {0};
- uint32_t log_cqe_n;
- uint32_t cq_size;
- int ret = 0;
-
- if (page_size == (size_t)-1) {
- DRV_LOG(ERR, "Failed to get page_size.");
- goto error;
- }
- if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
- !rxq_data->lro) {
- cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- cq_attr.mini_cqe_res_format =
- mlx5_rxq_mprq_enabled(rxq_data) ?
- MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
- MLX5DV_CQE_RES_FORMAT_HASH;
-#else
- cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
-#endif
- /*
- * For vectorized Rx, it must not be doubled in order to
- * make cq_ci and rq_ci aligned.
- */
- if (mlx5_rxq_check_vec_support(rxq_data) < 0)
- cqe_n *= 2;
- } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
- DRV_LOG(DEBUG,
- "port %u Rx CQE compression is disabled for HW"
- " timestamp",
- dev->data->port_id);
- } else if (priv->config.cqe_comp && rxq_data->lro) {
- DRV_LOG(DEBUG,
- "port %u Rx CQE compression is disabled for LRO",
- dev->data->port_id);
- }
-#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- if (priv->config.cqe_pad)
- cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
-#endif
- log_cqe_n = log2above(cqe_n);
- cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
- /* Query the EQN for this core. */
- if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
- DRV_LOG(ERR, "Failed to query EQN for CQ.");
- goto error;
- }
- cq_attr.eqn = eqn;
- buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
- rxq_ctrl->socket);
- if (!buf) {
- DRV_LOG(ERR, "Failed to allocate memory for CQ.");
- goto error;
- }
- rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
- rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
- cq_size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!rxq_ctrl->cq_umem) {
- DRV_LOG(ERR, "Failed to register umem for CQ.");
- goto error;
- }
- cq_attr.uar_page_id =
- mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
- cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
- cq_attr.q_umem_valid = 1;
- cq_attr.log_cq_size = log_cqe_n;
- cq_attr.log_page_size = rte_log2_u32(page_size);
- cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
- cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
- cq_attr.db_umem_valid = 1;
- cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
- if (!cq_obj)
- goto error;
- rxq_data->cqe_n = log_cqe_n;
- rxq_data->cqn = cq_obj->id;
- if (rxq_obj->devx_channel) {
- ret = mlx5_glue->devx_subscribe_devx_event
- (rxq_obj->devx_channel,
- cq_obj->obj,
- sizeof(event_nums),
- event_nums,
- (uint64_t)(uintptr_t)cq_obj);
- if (ret) {
- DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
- rte_errno = errno;
- goto error;
- }
- }
- /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
- memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
- return cq_obj;
-error:
- if (cq_obj)
- mlx5_devx_cmd_destroy(cq_obj);
- rxq_release_devx_cq_resources(rxq_ctrl);
- return NULL;
-}
-
-/**
- * Create the Rx hairpin queue object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array
- *
- * @return
- * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_rxq_obj *
-mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- struct mlx5_devx_create_rq_attr attr = { 0 };
- struct mlx5_rxq_obj *tmpl = NULL;
- uint32_t max_wq_data;
-
- MLX5_ASSERT(rxq_data);
- MLX5_ASSERT(!rxq_ctrl->obj);
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
- dev->data->port_id, rxq_data->idx);
- rte_errno = ENOMEM;
- return NULL;
- }
- tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
- tmpl->rxq_ctrl = rxq_ctrl;
- attr.hairpin = 1;
- max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
- /* Jumbo frames > 9KB should be supported, and more packets. */
- if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
- if (priv->config.log_hp_size > max_wq_data) {
- DRV_LOG(ERR, "total data size %u power of 2 is "
- "too large for hairpin",
- priv->config.log_hp_size);
- mlx5_free(tmpl);
- rte_errno = ERANGE;
- return NULL;
- }
- attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
- } else {
- attr.wq_attr.log_hairpin_data_sz =
- (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
- max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
- }
- /* Set the packets number to the maximum value for performance. */
- attr.wq_attr.log_hairpin_num_packets =
- attr.wq_attr.log_hairpin_data_sz -
- MLX5_HAIRPIN_QUEUE_STRIDE;
- tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
- rxq_ctrl->socket);
- if (!tmpl->rq) {
- DRV_LOG(ERR,
- "port %u Rx hairpin queue %u can't create rq object",
- dev->data->port_id, idx);
- mlx5_free(tmpl);
- rte_errno = errno;
- return NULL;
- }
- DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
- idx, (void *)&tmpl);
- LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
- dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
- return tmpl;
-}
-
-/**
- * Create the Rx queue Verbs/DevX object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array
- * @param type
- * Type of Rx queue object to create.
- *
- * @return
- * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
- */
-struct mlx5_rxq_obj *
-mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
- enum mlx5_rxq_obj_type type)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- struct ibv_wq_attr mod;
- unsigned int cqe_n;
- unsigned int wqe_n = 1 << rxq_data->elts_n;
- struct mlx5_rxq_obj *tmpl = NULL;
- struct mlx5_devx_dbr_page *cq_dbr_page = NULL;
- struct mlx5_devx_dbr_page *rq_dbr_page = NULL;
- struct mlx5dv_cq cq_info;
- struct mlx5dv_rwq rwq;
- int ret = 0;
- struct mlx5dv_obj obj;
-
- MLX5_ASSERT(rxq_data);
- MLX5_ASSERT(!rxq_ctrl->obj);
- if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
- return mlx5_rxq_obj_hairpin_new(dev, idx);
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
- dev->data->port_id, rxq_data->idx);
- rte_errno = ENOMEM;
- goto error;
- }
- tmpl->type = type;
- tmpl->rxq_ctrl = rxq_ctrl;
- if (rxq_ctrl->irq) {
- if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
- tmpl->ibv_channel =
- mlx5_glue->create_comp_channel(priv->sh->ctx);
- if (!tmpl->ibv_channel) {
- DRV_LOG(ERR, "port %u: comp channel creation "
- "failure", dev->data->port_id);
- rte_errno = ENOMEM;
- goto error;
- }
- tmpl->fd = ((struct ibv_comp_channel *)
- (tmpl->ibv_channel))->fd;
- } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
- int devx_ev_flag =
- MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
-
- tmpl->devx_channel =
- mlx5_glue->devx_create_event_channel
- (priv->sh->ctx,
- devx_ev_flag);
- if (!tmpl->devx_channel) {
- rte_errno = errno;
- DRV_LOG(ERR,
- "Failed to create event channel %d.",
- rte_errno);
- goto error;
- }
- tmpl->fd =
- mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
- }
- }
- if (mlx5_rxq_mprq_enabled(rxq_data))
- cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
- else
- cqe_n = wqe_n - 1;
- DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->sh->device_attr.max_qp_wr);
- DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
- dev->data->port_id, priv->sh->device_attr.max_sge);
- if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
- priv->verbs_alloc_ctx.obj = rxq_ctrl;
- /* Create CQ using Verbs API. */
- tmpl->ibv_cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n,
- tmpl);
- if (!tmpl->ibv_cq) {
- DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
- obj.cq.in = tmpl->ibv_cq;
- obj.cq.out = &cq_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
- if (ret) {
- rte_errno = ret;
- goto error;
- }
- if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- DRV_LOG(ERR,
- "port %u wrong MLX5_CQE_SIZE environment "
- "variable value: it should be set to %u",
- dev->data->port_id, RTE_CACHE_LINE_SIZE);
- rte_errno = EINVAL;
- goto error;
- }
- /* Fill the rings. */
- rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
- rxq_data->cq_db = cq_info.dbrec;
- rxq_data->cqes =
- (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
- rxq_data->cq_uar = cq_info.cq_uar;
- rxq_data->cqn = cq_info.cqn;
- /* Create WQ (RQ) using Verbs API. */
- tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
- tmpl);
- if (!tmpl->wq) {
- DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
- /* Change queue state to ready. */
- mod = (struct ibv_wq_attr){
- .attr_mask = IBV_WQ_ATTR_STATE,
- .wq_state = IBV_WQS_RDY,
- };
- ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
- if (ret) {
- DRV_LOG(ERR,
- "port %u Rx queue %u WQ state to IBV_WQS_RDY"
- " failed", dev->data->port_id, idx);
- rte_errno = ret;
- goto error;
- }
- obj.rwq.in = tmpl->wq;
- obj.rwq.out = &rwq;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
- if (ret) {
- rte_errno = ret;
- goto error;
- }
- rxq_data->wqes = rwq.buf;
- rxq_data->rq_db = rwq.dbrec;
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
- struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
- int64_t dbr_offset;
-
- /* Allocate CQ door-bell. */
- dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
- &cq_dbr_page);
- if (dbr_offset < 0) {
- DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
- goto error;
- }
- rxq_ctrl->cq_dbr_offset = dbr_offset;
- rxq_ctrl->cq_dbr_umem_id =
- mlx5_os_get_umem_id(cq_dbr_page->umem);
- rxq_data->cq_db =
- (uint32_t *)((uintptr_t)cq_dbr_page->dbrs +
- (uintptr_t)rxq_ctrl->cq_dbr_offset);
- rxq_data->cq_uar =
- mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
- /* Create CQ using DevX API. */
- tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
- if (!tmpl->devx_cq) {
- DRV_LOG(ERR, "Failed to create CQ.");
- goto error;
- }
- /* Allocate RQ door-bell. */
- dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
- &rq_dbr_page);
- if (dbr_offset < 0) {
- DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
- goto error;
- }
- rxq_ctrl->rq_dbr_offset = dbr_offset;
- rxq_ctrl->rq_dbr_umem_id =
- mlx5_os_get_umem_id(rq_dbr_page->umem);
- rxq_data->rq_db =
- (uint32_t *)((uintptr_t)rq_dbr_page->dbrs +
- (uintptr_t)rxq_ctrl->rq_dbr_offset);
- /* Create RQ using DevX API. */
- tmpl->rq = mlx5_devx_rq_new(dev, idx, tmpl->devx_cq->id);
- if (!tmpl->rq) {
- DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
- /* Change queue state to ready. */
- rq_attr.rq_state = MLX5_RQC_STATE_RST;
- rq_attr.state = MLX5_RQC_STATE_RDY;
- ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
- if (ret)
- goto error;
- }
- rxq_data->cq_arm_sn = 0;
- mlx5_rxq_initialize(rxq_data);
- rxq_data->cq_ci = 0;
- DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
- idx, (void *)&tmpl);
- LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
- dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
- return tmpl;
-error:
- if (tmpl) {
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
- if (tmpl->wq)
- claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
- if (tmpl->ibv_cq)
- claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
- if (tmpl->ibv_channel)
- claim_zero(mlx5_glue->destroy_comp_channel
- (tmpl->ibv_channel));
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
- if (tmpl->rq)
- claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
- if (tmpl->devx_cq)
- claim_zero(mlx5_devx_cmd_destroy
- (tmpl->devx_cq));
- if (tmpl->devx_channel)
- mlx5_glue->devx_destroy_event_channel
- (tmpl->devx_channel);
- if (rq_dbr_page)
- claim_zero(mlx5_release_dbr
- (&priv->dbrpgs,
- rxq_ctrl->rq_dbr_umem_id,
- rxq_ctrl->rq_dbr_offset));
- if (cq_dbr_page)
- claim_zero(mlx5_release_dbr
- (&priv->dbrpgs,
- rxq_ctrl->cq_dbr_umem_id,
- rxq_ctrl->cq_dbr_offset));
- }
- mlx5_free(tmpl);
- rte_errno = ret; /* Restore rte_errno. */
- }
- if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
- rxq_release_devx_rq_resources(rxq_ctrl);
- rxq_release_devx_cq_resources(rxq_ctrl);
- }
- return NULL;
-}
-
/**
* Verify the Rx queue objects list is empty
*
if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))
return 1;
if (rxq_ctrl->obj) {
- mlx5_rxq_obj_release(rxq_ctrl->obj);
+ priv->obj_ops->rxq_obj_release(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;
}
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
int32_t flow_meta_offset;
} __rte_cache_aligned;
-enum mlx5_rxq_obj_type {
- MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */
- MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */
- MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN,
- /* mlx5_rxq_obj with mlx5_devx_rq and hairpin support. */
-};
-
enum mlx5_rxq_type {
MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
MLX5_RXQ_TYPE_UNDEFINED,
};
-/* Verbs/DevX Rx queue elements. */
-struct mlx5_rxq_obj {
- LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
- struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
- enum mlx5_rxq_obj_type type;
- int fd; /* File descriptor for event channel */
- RTE_STD_C11
- union {
- struct {
- void *wq; /* Work Queue. */
- void *ibv_cq; /* Completion Queue. */
- void *ibv_channel;
- };
- struct {
- struct mlx5_devx_obj *rq; /* DevX Rx Queue object. */
- struct mlx5_devx_obj *devx_cq; /* DevX CQ object. */
- void *devx_channel;
- };
- };
-};
-
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
- enum mlx5_rxq_obj_type type);
int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
+void rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
- enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
- struct mlx5_rxq_data *rxq = NULL;
-
- for (i = 0; i < priv->rxqs_n; ++i) {
- rxq = (*priv->rxqs)[i];
- if (rxq && rxq->lro) {
- obj_type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
- break;
- }
- }
+
/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
if (mlx5_mprq_alloc_mp(dev)) {
/* Should not release Rx queues but return immediately. */
if (!rxq_ctrl)
continue;
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
- rxq_ctrl->obj = mlx5_rxq_obj_new
- (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
- if (!rxq_ctrl->obj)
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ /* Pre-register Rx mempool. */
+ mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
+ DRV_LOG(DEBUG, "port %u Rx queue %u registering mp %s"
+ " having %u chunks", dev->data->port_id,
+ rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
+ mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+ ret = rxq_alloc_elts(rxq_ctrl);
+ if (ret)
goto error;
- continue;
}
- /* Pre-register Rx mempool. */
- mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
- DRV_LOG(DEBUG,
- "port %u Rx queue %u registering"
- " mp %s having %u chunks",
- dev->data->port_id, rxq_ctrl->rxq.idx,
- mp->name, mp->nb_mem_chunks);
- mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
- ret = rxq_alloc_elts(rxq_ctrl);
- if (ret)
- goto error;
- rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
+ rxq_ctrl->obj = priv->obj_ops->rxq_obj_new(dev, i);
if (!rxq_ctrl->obj)
goto error;
- if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
- rxq_ctrl->wqn =
- ((struct ibv_wq *)(rxq_ctrl->obj->wq))->wq_num;
- else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
- rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
}
return 0;
error: