#include <errno.h>
#include <string.h>
#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
* Number of elements to allocate.
*/
static void
-txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
+txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl, unsigned int elts_n)
{
unsigned int i;
* Pointer to TX queue structure.
*/
static void
-txq_free_elts(struct txq_ctrl *txq_ctrl)
+txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
{
const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
const uint16_t elts_m = elts_n - 1;
* Pointer to TX queue structure.
*/
void
-txq_cleanup(struct txq_ctrl *txq_ctrl)
+mlx5_txq_cleanup(struct mlx5_txq_ctrl *txq_ctrl)
{
size_t i;
* 0 on success, errno value on failure.
*/
static inline int
-txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
+txq_setup(struct mlx5_txq_ctrl *tmpl, struct mlx5_txq_ctrl *txq_ctrl)
{
- struct mlx5_qp *qp = to_mqp(tmpl->qp);
+ struct mlx5dv_qp qp;
struct ibv_cq *ibcq = tmpl->cq;
- struct ibv_mlx5_cq_info cq_info;
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_obj obj;
+ int ret = 0;
- if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
- ERROR("Unable to query CQ info. check your OFED.");
- return ENOTSUP;
+ qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
+ obj.cq.in = ibcq;
+ obj.cq.out = &cq_info;
+ obj.qp.in = tmpl->qp;
+ obj.qp.out = &qp;
+ ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
+ if (ret != 0) {
+ return -EINVAL;
}
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
return EINVAL;
}
tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
- tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
- tmpl->txq.wqes = qp->gen_data.sqstart;
- tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
- tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
- tmpl->txq.bf_reg = qp->gen_data.bf->reg;
+ tmpl->txq.qp_num_8s = tmpl->qp->qp_num << 8;
+ tmpl->txq.wqes = qp.sq.buf;
+ tmpl->txq.wqe_n = log2above(qp.sq.wqe_cnt);
+ tmpl->txq.qp_db = &qp.dbrec[MLX5_SND_DBR];
+ tmpl->txq.bf_reg = qp.bf.reg;
tmpl->txq.cq_db = cq_info.dbrec;
tmpl->txq.cqes =
(volatile struct mlx5_cqe (*)[])
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
+ if (qp.comp_mask | MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
+ tmpl->uar_mmap_offset = qp.uar_mmap_offset;
+ } else {
+ ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
+ return EINVAL;
+ }
+
return 0;
}
* 0 on success, errno value on failure.
*/
int
-txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
- uint16_t desc, unsigned int socket,
- const struct rte_eth_txconf *conf)
+mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf)
{
struct priv *priv = mlx5_get_priv(dev);
- struct txq_ctrl tmpl = {
+ struct mlx5_txq_ctrl tmpl = {
.priv = priv,
.socket = socket,
};
union {
- struct ibv_exp_qp_init_attr init;
- struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_qp_attr mod;
- struct ibv_exp_cq_attr cq_attr;
+ struct ibv_qp_init_attr_ex init;
+ struct ibv_cq_init_attr_ex cq;
+ struct ibv_qp_attr mod;
+ struct ibv_cq_ex cq_attr;
} attr;
unsigned int cqe_n;
const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +
if (priv->mps == MLX5_MPW_ENHANCED)
tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */
- attr.cq = (struct ibv_exp_cq_init_attr){
+ attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
if (priv->mps == MLX5_MPW_ENHANCED)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = ibv_exp_create_cq(priv->ctx,
- cqe_n,
- NULL, NULL, 0, &attr.cq);
+ tmpl.cq = ibv_create_cq(priv->ctx,
+ cqe_n,
+ NULL, NULL, 0);
if (tmpl.cq == NULL) {
ret = ENOMEM;
ERROR("%p: CQ creation failure: %s",
goto error;
}
DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
+ priv->device_attr.orig_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- attr.init = (struct ibv_exp_qp_init_attr){
+ priv->device_attr.orig_attr.max_sge);
+ attr.init = (struct ibv_qp_init_attr_ex){
/* CQ to be associated with the send queue. */
.send_cq = tmpl.cq,
/* CQ to be associated with the receive queue. */
.recv_cq = tmpl.cq,
.cap = {
/* Max number of outstanding WRs. */
- .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
+ .max_send_wr =
+ ((priv->device_attr.orig_attr.max_qp_wr < desc) ?
+ priv->device_attr.orig_attr.max_qp_wr :
+ desc),
/*
* Max number of scatter/gather elements in a WR,
* must be 1 to prevent libmlx5 from trying to affect
* TX burst. */
.sq_sig_all = 0,
.pd = priv->pd,
- .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
+ .comp_mask = IBV_QP_INIT_ATTR_PD,
};
if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ unsigned int ds_cnt;
+
tmpl.txq.max_inline =
((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
attr.init.cap.max_inline_data =
tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
}
+ /*
+ * Check if the inline size is too large in a way which
+ * can make the WQE DS to overflow.
+ * Considering in calculation:
+ * WQE CTRL (1 DS)
+ * WQE ETH (1 DS)
+ * Inline part (N DS)
+ */
+ ds_cnt = 2 +
+ (attr.init.cap.max_inline_data / MLX5_WQE_DWORD_SIZE);
+ if (ds_cnt > MLX5_DSEG_MAX) {
+ unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
+ MLX5_WQE_DWORD_SIZE;
+
+ max_inline = max_inline - (max_inline %
+ RTE_CACHE_LINE_SIZE);
+ WARN("txq inline is too large (%d) setting it to "
+ "the maximum possible: %d\n",
+ priv->txq_inline, max_inline);
+ tmpl.txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
+ attr.init.cap.max_inline_data = max_inline;
+ }
}
if (priv->tso) {
attr.init.max_tso_header =
max_tso_inline * RTE_CACHE_LINE_SIZE;
- attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
+ attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
max_tso_inline);
tmpl.txq.tso_en = 1;
}
if (priv->tunnel_en)
tmpl.txq.tunnel_en = 1;
- tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
+ tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ret = (errno ? errno : EINVAL);
ERROR("%p: QP creation failure: %s",
attr.init.cap.max_send_wr,
attr.init.cap.max_send_sge,
attr.init.cap.max_inline_data);
- attr.mod = (struct ibv_exp_qp_attr){
+ attr.mod = (struct ibv_qp_attr){
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
/* Primary port number. */
.port_num = priv->port
};
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,
- (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));
+ ret = ibv_modify_qp(tmpl.qp, &attr.mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
(void *)dev, strerror(ret));
goto error;
}
txq_alloc_elts(&tmpl, desc);
- attr.mod = (struct ibv_exp_qp_attr){
+ attr.mod = (struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR
};
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
+ ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
(void *)dev, strerror(ret));
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
+ ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
(void *)dev, strerror(ret));
}
/* Clean up txq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
- txq_cleanup(txq_ctrl);
+ mlx5_txq_cleanup(txq_ctrl);
*txq_ctrl = tmpl;
DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
/* Pre-register known mempools. */
- rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
+ rte_mempool_walk(mlx5_txq_mp2mr_iter, txq_ctrl);
assert(ret == 0);
return 0;
error:
- txq_cleanup(&tmpl);
+ mlx5_txq_cleanup(&tmpl);
assert(ret > 0);
return ret;
}
unsigned int socket, const struct rte_eth_txconf *conf)
{
struct priv *priv = dev->data->dev_private;
- struct txq *txq = (*priv->txqs)[idx];
- struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
int ret;
if (mlx5_is_secondary())
if (txq != NULL) {
DEBUG("%p: reusing already allocated queue index %u (%p)",
(void *)dev, idx, (void *)txq);
- if (priv->started) {
+ if (dev->data->dev_started) {
priv_unlock(priv);
return -EEXIST;
}
(*priv->txqs)[idx] = NULL;
- txq_cleanup(txq_ctrl);
+ mlx5_txq_cleanup(txq_ctrl);
/* Resize if txq size is changed. */
if (txq_ctrl->txq.elts_n != log2above(desc)) {
txq_ctrl = rte_realloc(txq_ctrl,
return -ENOMEM;
}
}
- ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
+ ret = mlx5_txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
if (ret)
rte_free(txq_ctrl);
else {
void
mlx5_tx_queue_release(void *dpdk_txq)
{
- struct txq *txq = (struct txq *)dpdk_txq;
- struct txq_ctrl *txq_ctrl;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
struct priv *priv;
unsigned int i;
if (txq == NULL)
return;
- txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
priv = txq_ctrl->priv;
priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
(*priv->txqs)[i] = NULL;
break;
}
- txq_cleanup(txq_ctrl);
+ mlx5_txq_cleanup(txq_ctrl);
rte_free(txq_ctrl);
priv_unlock(priv);
}
+
+
+/**
+ * Map locally UAR used in Tx queues for BlueFlame doorbell.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param fd
+ * Verbs file descriptor to map UAR pages.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+priv_tx_uar_remap(struct priv *priv, int fd)
+{
+ unsigned int i, j;
+ uintptr_t pages[priv->txqs_n];
+ unsigned int pages_n = 0;
+ uintptr_t uar_va;
+ void *addr;
+ struct mlx5_txq_data *txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ int already_mapped;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+
+ /*
+ * As rdma-core, UARs are mapped in size of OS page size.
+ * Use aligned address to avoid duplicate mmap.
+ * Ref to libmlx5 function: mlx5_init_context()
+ */
+ for (i = 0; i != priv->txqs_n; ++i) {
+ txq = (*priv->txqs)[i];
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ uar_va = (uintptr_t)txq_ctrl->txq.bf_reg;
+ uar_va = RTE_ALIGN_FLOOR(uar_va, page_size);
+ already_mapped = 0;
+ for (j = 0; j != pages_n; ++j) {
+ if (pages[j] == uar_va) {
+ already_mapped = 1;
+ break;
+ }
+ }
+ if (already_mapped)
+ continue;
+ pages[pages_n++] = uar_va;
+ addr = mmap((void *)uar_va, page_size,
+ PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
+ txq_ctrl->uar_mmap_offset);
+ if (addr != (void *)uar_va) {
+ ERROR("call to mmap failed on UAR for txq %d\n", i);
+ return -1;
+ }
+ }
+ return 0;
+}