#include <string.h>
#include <stdint.h>
#include <unistd.h>
-#include <sys/mman.h>
#include <inttypes.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#include <infiniband/mlx5dv.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ethdev_driver.h>
#include <rte_common.h>
+#include <rte_eal_paging.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
return offloads;
}
+/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
+static void
+txq_sync_cq(struct mlx5_txq_data *txq)
+{
+ volatile struct mlx5_cqe *cqe;
+ int ret, i;
+
+ i = txq->cqe_s;
+ do {
+ cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
+ ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
+ if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
+ if (likely(ret != MLX5_CQE_STATUS_ERR)) {
+ /* No new CQEs in completion queue. */
+ MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
+ break;
+ }
+ }
+ ++txq->cq_ci;
+ } while (--i);
+ /* Move all CQEs to HW ownership. */
+ for (i = 0; i < txq->cqe_s; i++) {
+ cqe = &txq->cqes[i];
+ cqe->op_own = MLX5_CQE_INVALIDATE;
+ }
+ /* Resync CQE and WQE (WQ in reset state). */
+ rte_cio_wmb();
+ *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+ rte_cio_wmb();
+}
+
+/**
+ * Tx queue stop. Device queue goes to the idle state,
+ * all involved mbufs are freed from elts/WQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Tx queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /* Move QP to RESET state. */
+ if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
+ struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
+
+ /* Change queue state to reset with DevX. */
+ msq_attr.sq_state = MLX5_SQC_STATE_RDY;
+ msq_attr.state = MLX5_SQC_STATE_RST;
+ ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq_devx,
+ &msq_attr);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change the "
+ "Tx QP state to RESET %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ } else {
+ struct ibv_qp_attr mod = {
+ .qp_state = IBV_QPS_RESET,
+ .port_num = (uint8_t)priv->dev_port,
+ };
+ struct ibv_qp *qp = txq_ctrl->obj->qp;
+
+ /* Change queue state to reset with Verbs. */
+ ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
+ "%s", strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ }
+ /* Handle all send completions. */
+ txq_sync_cq(txq);
+ /* Free elts stored in the SQ. */
+ txq_free_elts(txq_ctrl);
+ /* Prevent writing new pkts to SQ by setting no free WQE.*/
+ txq->wqe_ci = txq->wqe_s;
+ txq->wqe_pi = 0;
+ txq->elts_comp = 0;
+ /* Set the actual queue state. */
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+/**
+ * Tx queue stop. Device queue goes to the idle state,
+ * all involved mbufs are freed from elts/WQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Tx queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
+{
+ int ret;
+
+ if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
+ DRV_LOG(ERR, "Hairpin queue can't be stopped");
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ ret = mlx5_mp_os_req_queue_control(dev, idx,
+ MLX5_MP_REQ_QUEUE_TX_STOP);
+ } else {
+ ret = mlx5_tx_queue_stop_primary(dev, idx);
+ }
+ return ret;
+}
+
+/**
+ * Rx queue start. Device queue goes to the ready state,
+ * all required mbufs are allocated and WQ is replenished.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
+ struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
+ struct mlx5_txq_obj *obj = txq_ctrl->obj;
+
+ msq_attr.sq_state = MLX5_SQC_STATE_RDY;
+ msq_attr.state = MLX5_SQC_STATE_RST;
+ ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
+ if (ret) {
+ rte_errno = errno;
+ DRV_LOG(ERR,
+ "Cannot change the Tx QP state to RESET "
+ "%s", strerror(errno));
+ return ret;
+ }
+ msq_attr.sq_state = MLX5_SQC_STATE_RST;
+ msq_attr.state = MLX5_SQC_STATE_RDY;
+ ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
+ if (ret) {
+ rte_errno = errno;
+ DRV_LOG(ERR,
+ "Cannot change the Tx QP state to READY "
+ "%s", strerror(errno));
+ return ret;
+ }
+ } else {
+ struct ibv_qp_attr mod = {
+ .qp_state = IBV_QPS_RESET,
+ .port_num = (uint8_t)priv->dev_port,
+ };
+ struct ibv_qp *qp = txq_ctrl->obj->qp;
+
+ ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
+ "%s", strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_INIT;
+ ret = mlx5_glue->modify_qp(qp, &mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_RTR;
+ ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ mod.qp_state = IBV_QPS_RTS;
+ ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
+ strerror(errno));
+ rte_errno = errno;
+ return ret;
+ }
+ }
+ txq_ctrl->txq.wqe_ci = 0;
+ txq_ctrl->txq.wqe_pi = 0;
+ txq_ctrl->txq.elts_comp = 0;
+ /* Set the actual queue state. */
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+}
+
+/**
+ * Rx queue start. Device queue goes to the ready state,
+ * all required mbufs are allocated and WQ is replenished.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
+{
+ int ret;
+
+ if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
+ DRV_LOG(ERR, "Hairpin queue can't be started");
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ ret = mlx5_mp_os_req_queue_control(dev, idx,
+ MLX5_MP_REQ_QUEUE_TX_START);
+ } else {
+ ret = mlx5_tx_queue_start_primary(dev, idx);
+ }
+ return ret;
+}
+
/**
* Tx queue presetup checks.
*
DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
return 0;
}
{
struct mlx5_priv *priv = txq_ctrl->priv;
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
- const size_t page_size = sysconf(_SC_PAGESIZE);
#ifndef RTE_ARCH_64
unsigned int lock_idx;
#endif
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return;
void *addr;
uintptr_t uar_va;
uintptr_t offset;
- const size_t page_size = sysconf(_SC_PAGESIZE);
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return 0;
*/
uar_va = (uintptr_t)txq_ctrl->bf_reg;
offset = uar_va & (page_size - 1); /* Offset in page. */
- addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
- txq_ctrl->uar_mmap_offset);
- if (addr == MAP_FAILED) {
+ addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
+ fd, txq_ctrl->uar_mmap_offset);
+ if (!addr) {
DRV_LOG(ERR,
"port %u mmap failed for BF reg of txq %u",
txq->port_id, txq->idx);
txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
- const size_t page_size = sysconf(_SC_PAGESIZE);
void *addr;
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return;
addr = ppriv->uar_table[txq_ctrl->txq.idx];
- munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+ rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
}
/**
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_txq_obj *txq_obj = NULL;
- size_t page_size = sysconf(_SC_PAGESIZE);
+ size_t page_size;
struct mlx5_cqe *cqe;
uint32_t i, nqe;
+ void *reg_addr;
+ size_t alignment = (size_t)-1;
int ret = 0;
MLX5_ASSERT(txq_data);
MLX5_ASSERT(!txq_ctrl->obj);
+ page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
sizeof(struct mlx5_txq_obj), 0,
txq_ctrl->socket);
goto error;
}
/* Allocate memory buffer for CQEs. */
+ alignment = MLX5_CQE_BUF_ALIGNMENT;
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ goto error;
+ }
txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
nqe * sizeof(struct mlx5_cqe),
- MLX5_CQE_BUF_ALIGNMENT,
+ alignment,
sh->numa_node);
if (!txq_obj->cq_buf) {
DRV_LOG(ERR,
/* Create completion queue object with DevX. */
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
- cq_attr.uar_page_id = sh->tx_uar->page_id;
+ cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
cq_attr.eqn = sh->txpp.eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
- cq_attr.q_umem_id = txq_obj->cq_umem->umem_id;
+ cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
cq_attr.db_umem_valid = 1;
cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
sq_attr.allow_swp = !!priv->config.swp;
sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
- sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
+ sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
sq_attr.wq_attr.pd = sh->pdn;
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
sq_attr.wq_attr.dbr_umem_id =
mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
sq_attr.wq_attr.wq_umem_valid = 1;
- sq_attr.wq_attr.wq_umem_id = txq_obj->sq_umem->umem_id;
+ sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
txq_obj->sq_devx = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
if (!txq_obj->sq_devx) {
priv->sh->tdn = priv->sh->td->id;
#endif
MLX5_ASSERT(sh->tx_uar);
- MLX5_ASSERT(sh->tx_uar->reg_addr);
- txq_ctrl->bf_reg = sh->tx_uar->reg_addr;
- txq_ctrl->uar_mmap_offset = sh->tx_uar->mmap_off;
+ reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
+ MLX5_ASSERT(reg_addr);
+ txq_ctrl->bf_reg = reg_addr;
+ txq_ctrl->uar_mmap_offset =
+ mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
rte_atomic32_set(&txq_obj->refcnt, 1);
txq_uar_init(txq_ctrl);
LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
txq_data->cqe_s = 1 << txq_data->cqe_n;
txq_data->cqe_m = txq_data->cqe_s - 1;
- txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
+ txq_data->qp_num_8s = ((struct ibv_qp *)tmpl.qp)->qp_num << 8;
txq_data->wqes = qp.sq.buf;
txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
txq_data->wqe_s = 1 << txq_data->wqe_n;
LIST_REMOVE(txq, next);
mlx5_free(txq);
(*priv->txqs)[idx] = NULL;
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
return 1;