#include <rte_malloc.h>
#include <ethdev_driver.h>
#include <rte_common.h>
+#include <rte_eal_paging.h>
#include <mlx5_glue.h>
#include <mlx5_common.h>
.qp_state = IBV_QPS_RESET,
.port_num = dev_port,
};
- int attr_mask = (IBV_QP_STATE | IBV_QP_PORT);
int ret;
if (type != MLX5_TXQ_MOD_RST2RDY) {
if (type == MLX5_TXQ_MOD_RDY2RST)
return 0;
}
- if (type == MLX5_TXQ_MOD_ERR2RDY)
- attr_mask = IBV_QP_STATE;
mod.qp_state = IBV_QPS_INIT;
- ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask);
+ ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
if (ret) {
DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
strerror(errno));
wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
- .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
- .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
+ .single_stride_log_num_of_bytes = rxq_data->log_strd_sz,
+ .single_wqe_log_num_of_strides = rxq_data->log_strd_num,
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
rxq_data->cq_db = cq_info.dbrec;
rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
- rxq_data->cq_uar = cq_info.cq_uar;
+ rxq_data->uar_data.db = RTE_PTR_ADD(cq_info.cq_uar, MLX5_CQ_DOORBELL);
+#ifndef RTE_ARCH_64
+ rxq_data->uar_data.sl_p = &priv->sh->uar_lock_cq;
+#endif
rxq_data->cqn = cq_info.cqn;
/* Create WQ (RQ) using Verbs API. */
tmpl->wq = mlx5_rxq_ibv_wq_create(rxq);
/* CQ to be associated with the receive queue. */
qp_attr.recv_cq = txq_ctrl->obj->cq;
/* Max number of outstanding WRs. */
- qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
- priv->sh->device_attr.max_qp_wr : desc);
+ qp_attr.cap.max_send_wr = RTE_MIN(priv->sh->dev_cap.max_qp_wr, desc);
/*
* Max number of scatter/gather elements in a WR, must be 1 to prevent
* libmlx5 from trying to affect must be 1 to prevent libmlx5 from
* trying to affect too much memory. TX gather is not impacted by the
- * device_attr.max_sge limit and will still work properly.
+ * dev_cap.max_sge limit and will still work properly.
*/
qp_attr.cap.max_send_sge = 1;
qp_attr.qp_type = IBV_QPT_RAW_PACKET,
return qp_obj;
}
+/**
+ * Initialize Tx UAR registers for primary process.
+ *
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
+ * @param bf_reg
+ * BlueFlame register from Verbs UAR.
+ */
+static void
+mlx5_txq_ibv_uar_init(struct mlx5_txq_ctrl *txq_ctrl, void *bf_reg)
+{
+ struct mlx5_priv *priv = txq_ctrl->priv;
+ struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
+ const size_t page_size = rte_mem_page_size();
+ struct mlx5_txq_data *txq = &txq_ctrl->txq;
+ off_t uar_mmap_offset = txq_ctrl->uar_mmap_offset;
+#ifndef RTE_ARCH_64
+ unsigned int lock_idx;
+#endif
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(ppriv);
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ }
+ txq->db_heu = priv->sh->cdev->config.dbnc == MLX5_SQ_DB_HEURISTIC;
+ txq->db_nc = mlx5_db_map_type_get(uar_mmap_offset, page_size);
+ ppriv->uar_table[txq->idx].db = bf_reg;
+#ifndef RTE_ARCH_64
+ /* Assign an UAR lock according to UAR page number. */
+ lock_idx = (uar_mmap_offset / page_size) & MLX5_UAR_PAGE_NUM_MASK;
+ ppriv->uar_table[txq->idx].sl_p = &priv->sh->uar_lock[lock_idx];
+#endif
+}
+
/**
* Create the Tx queue Verbs object.
*
qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/* If using DevX, need additional mask to read tisn value. */
- if (priv->sh->devx && !priv->sh->tdn)
+ if (priv->sh->cdev->config.devx && !priv->sh->tdn)
qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
#endif
obj.cq.in = txq_obj->cq;
txq_data->wqe_pi = 0;
txq_data->wqe_comp = 0;
txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
+ txq_data->wait_on_time = !!(!priv->sh->config.tx_pp &&
+ priv->sh->cdev->config.hca_attr.wait_on_time &&
+ txq_data->offloads &
+ RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP);
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/*
* If using DevX need to query and store TIS transport domain value.
* This is done once per port.
* Will use this value on Rx, when creating matching TIR.
*/
- if (priv->sh->devx && !priv->sh->tdn) {
+ if (priv->sh->cdev->config.devx && !priv->sh->tdn) {
ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
&priv->sh->tdn);
if (ret) {
rte_errno = EINVAL;
goto error;
}
- txq_uar_init(txq_ctrl, qp.bf.reg);
+ mlx5_txq_ibv_uar_init(txq_ctrl, qp.bf.reg);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
error: