#include <stddef.h>
#include <errno.h>
-#include <stdbool.h>
#include <string.h>
#include <stdint.h>
#include <unistd.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_common.h>
#include <mlx5_glue.h>
}
/* verbs operations. */
-const struct mlx5_verbs_ops mlx5_verbs_ops = {
+const struct mlx5_mr_ops mlx5_mr_verbs_ops = {
.reg_mr = mlx5_reg_mr,
.dereg_mr = mlx5_dereg_mr,
};
*
* @param rxq_obj
* Verbs Rx queue object.
+ * @param type
+ * Type of change queue state.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
+mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
{
struct ibv_wq_attr mod = {
.attr_mask = IBV_WQ_ATTR_STATE,
- .wq_state = is_start ? IBV_WQS_RDY : IBV_WQS_RESET,
+ .wq_state = (enum ibv_wq_state)type,
};
return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
}
+/**
+ * Modify QP using Verbs API.
+ *
+ * @param txq_obj
+ * Verbs Tx queue object.
+ * @param type
+ * Type of change queue state.
+ * @param dev_port
+ * IB device port number.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
+ uint8_t dev_port)
+{
+ struct ibv_qp_attr mod = {
+ .qp_state = IBV_QPS_RESET,
+ .port_num = dev_port,
+ };
+ int attr_mask = (IBV_QP_STATE | IBV_QP_PORT);
+ int ret;
+
+ if (type != MLX5_TXQ_MOD_RST2RDY) {
+ ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ if (type == MLX5_TXQ_MOD_RDY2RST)
+ return 0;
+ }
+ if (type == MLX5_TXQ_MOD_ERR2RDY)
+ attr_mask = IBV_QP_STATE;
+ mod.qp_state = IBV_QPS_INIT;
+ ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_RTR;
+ ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_RTS;
+ ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ return 0;
+}
+
/**
* Create a CQ Verbs object.
*
if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
cq_attr.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+ rxq_data->byte_mask = UINT32_MAX;
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- cq_attr.mlx5.cqe_comp_res_format =
- mlx5_rxq_mprq_enabled(rxq_data) ?
- MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
- MLX5DV_CQE_RES_FORMAT_HASH;
+ if (mlx5_rxq_mprq_enabled(rxq_data)) {
+ cq_attr.mlx5.cqe_comp_res_format =
+ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX;
+ rxq_data->mcqe_format =
+ MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
+ } else {
+ cq_attr.mlx5.cqe_comp_res_format =
+ MLX5DV_CQE_RES_FORMAT_HASH;
+ rxq_data->mcqe_format =
+ MLX5_CQE_RESP_FORMAT_HASH;
+ }
#else
cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+ rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH;
#endif
/*
* For vectorized Rx, it must not be doubled in order to
dev->data->port_id);
}
#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- if (priv->config.cqe_pad) {
+ if (RTE_CACHE_LINE_SIZE == 128) {
cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
}
MLX5_ASSERT(rxq_data);
MLX5_ASSERT(tmpl);
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
- priv->verbs_alloc_ctx.obj = rxq_ctrl;
- tmpl->type = MLX5_RXQ_OBJ_TYPE_IBV;
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
tmpl->ibv_channel =
goto error;
}
/* Change queue state to ready. */
- ret = mlx5_ibv_modify_wq(tmpl, true);
+ ret = mlx5_ibv_modify_wq(tmpl, IBV_WQS_RDY);
if (ret) {
DRV_LOG(ERR,
"Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
rxq_data->cq_arm_sn = 0;
mlx5_rxq_initialize(rxq_data);
rxq_data->cq_ci = 0;
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
return 0;
if (tmpl->ibv_channel)
claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
rte_errno = ret; /* Restore rte_errno. */
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return -rte_errno;
}
* Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Tx queue array.
- * @param rxq_obj
- * Pointer to Tx queue object data.
*
* @return
- * The QP Verbs object initialized, NULL otherwise and rte_errno is set.
+ * The QP Verbs object, NULL otherwise and rte_errno is set.
*/
static struct ibv_qp *
-mlx5_ibv_qp_new(struct rte_eth_dev *dev, uint16_t idx,
- struct mlx5_txq_obj *txq_obj)
+mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct ibv_qp_init_attr_ex qp_attr = { 0 };
const int desc = 1 << txq_data->elts_n;
- MLX5_ASSERT(!txq_ctrl->obj);
+ MLX5_ASSERT(txq_ctrl->obj->cq);
/* CQ to be associated with the send queue. */
- qp_attr.send_cq = txq_obj->cq;
+ qp_attr.send_cq = txq_ctrl->obj->cq;
/* CQ to be associated with the receive queue. */
- qp_attr.recv_cq = txq_obj->cq;
+ qp_attr.recv_cq = txq_ctrl->obj->cq;
/* Max number of outstanding WRs. */
qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
priv->sh->device_attr.max_qp_wr : desc);
* Queue index in DPDK Tx queue array.
*
* @return
- * The Verbs object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-struct mlx5_txq_obj *
+int
mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
- struct mlx5_txq_obj tmpl;
- struct mlx5_txq_obj *txq_obj = NULL;
- struct ibv_qp_attr mod;
+ struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
unsigned int cqe_n;
struct mlx5dv_qp qp;
struct mlx5dv_cq cq_info;
int ret = 0;
MLX5_ASSERT(txq_data);
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
- priv->verbs_alloc_ctx.obj = txq_ctrl;
+ MLX5_ASSERT(txq_obj);
+ txq_obj->txq_ctrl = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION "
"must never be set.", dev->data->port_id);
rte_errno = EINVAL;
- return NULL;
+ return -rte_errno;
}
- memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
cqe_n = desc / MLX5_TX_COMP_THRESH +
1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
- if (tmpl.cq == NULL) {
+ txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
+ if (txq_obj->cq == NULL) {
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
- tmpl.qp = mlx5_ibv_qp_new(dev, idx, &tmpl);
- if (tmpl.qp == NULL) {
- rte_errno = errno;
- goto error;
- }
- mod = (struct ibv_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* IB device port number. */
- .port_num = (uint8_t)priv->dev_port,
- };
- ret = mlx5_glue->modify_qp(tmpl.qp, &mod, (IBV_QP_STATE | IBV_QP_PORT));
- if (ret) {
- DRV_LOG(ERR,
- "Port %u Tx queue %u QP state to IBV_QPS_INIT failed.",
- dev->data->port_id, idx);
- rte_errno = errno;
- goto error;
- }
- mod = (struct ibv_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = mlx5_glue->modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
- if (ret) {
- DRV_LOG(ERR,
- "Port %u Tx queue %u QP state to IBV_QPS_RTR failed.",
- dev->data->port_id, idx);
+ txq_obj->qp = mlx5_txq_ibv_qp_create(dev, idx);
+ if (txq_obj->qp == NULL) {
rte_errno = errno;
goto error;
}
- mod.qp_state = IBV_QPS_RTS;
- ret = mlx5_glue->modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
+ ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY,
+ (uint8_t)priv->dev_port);
if (ret) {
- DRV_LOG(ERR,
- "Port %u Tx queue %u QP state to IBV_QPS_RTS failed.",
+ DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.",
dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
- txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- sizeof(struct mlx5_txq_obj), 0,
- txq_ctrl->socket);
- if (!txq_obj) {
- DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate memory.",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/* If using DevX, need additional mask to read tisn value. */
if (priv->sh->devx && !priv->sh->tdn)
qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
#endif
- obj.cq.in = tmpl.cq;
+ obj.cq.in = txq_obj->cq;
obj.cq.out = &cq_info;
- obj.qp.in = tmpl.qp;
+ obj.qp.in = txq_obj->qp;
obj.qp.out = &qp;
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
if (ret != 0) {
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
txq_data->cqe_s = 1 << txq_data->cqe_n;
txq_data->cqe_m = txq_data->cqe_s - 1;
- txq_data->qp_num_8s = ((struct ibv_qp *)tmpl.qp)->qp_num << 8;
+ txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8;
txq_data->wqes = qp.sq.buf;
txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
txq_data->wqe_s = 1 << txq_data->wqe_n;
txq_data->wqe_pi = 0;
txq_data->wqe_comp = 0;
txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
- txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- txq_data->cqe_s * sizeof(*txq_data->fcqs),
- RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
- if (!txq_data->fcqs) {
- DRV_LOG(ERR, "Port %u Tx queue %u can't allocate memory (FCQ).",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/*
* If using DevX need to query and store TIS transport domain value.
* Will use this value on Rx, when creating matching TIR.
*/
if (priv->sh->devx && !priv->sh->tdn) {
- ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
+ ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
&priv->sh->tdn);
if (ret) {
DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
}
}
#endif
- txq_obj->qp = tmpl.qp;
- txq_obj->cq = tmpl.cq;
txq_ctrl->bf_reg = qp.bf.reg;
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
goto error;
}
txq_uar_init(txq_ctrl);
- txq_obj->txq_ctrl = txq_ctrl;
- LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- return txq_obj;
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl.cq)
- claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
- if (tmpl.qp)
- claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
- if (txq_data->fcqs) {
- mlx5_free(txq_data->fcqs);
- txq_data->fcqs = NULL;
- }
- if (txq_obj)
- mlx5_free(txq_obj);
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ if (txq_obj->cq)
+ claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
+ if (txq_obj->qp)
+ claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
rte_errno = ret; /* Restore rte_errno. */
- return NULL;
+ return -rte_errno;
}
/**
MLX5_ASSERT(txq_obj);
claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
- if (txq_obj->txq_ctrl->txq.fcqs) {
- mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
- txq_obj->txq_ctrl->txq.fcqs = NULL;
- }
- LIST_REMOVE(txq_obj, next);
- mlx5_free(txq_obj);
}
struct mlx5_obj_ops ibv_obj_ops = {
.drop_action_create = mlx5_ibv_drop_action_create,
.drop_action_destroy = mlx5_ibv_drop_action_destroy,
.txq_obj_new = mlx5_txq_ibv_obj_new,
+ .txq_obj_modify = mlx5_ibv_modify_qp,
.txq_obj_release = mlx5_txq_ibv_obj_release,
};