-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include "mlx5_utils.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
/**
* Allocate TX queue elements.
*
* @param txq_ctrl
* Pointer to TX queue structure.
- * @param elts_n
- * Number of elements to allocate.
*/
-static void
-txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
+void
+txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
{
+ const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
unsigned int i;
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
- for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
- volatile struct mlx5_wqe64 *wqe =
- (volatile struct mlx5_wqe64 *)
- txq_ctrl->txq.wqes + i;
-
- memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
- }
DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
* Pointer to TX queue structure.
*/
static void
-txq_free_elts(struct txq_ctrl *txq_ctrl)
+txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
{
const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
const uint16_t elts_m = elts_n - 1;
}
/**
- * Clean up a TX queue.
+ * Returns the per-port supported offloads.
*
- * Destroy objects, free allocated memory and reset the structure for reuse.
+ * @param priv
+ * Pointer to private structure.
*
- * @param txq_ctrl
- * Pointer to TX queue structure.
+ * @return
+ * Supported Tx offloads.
*/
-void
-txq_cleanup(struct txq_ctrl *txq_ctrl)
+uint64_t
+mlx5_priv_get_tx_port_offloads(struct priv *priv)
{
- size_t i;
-
- DEBUG("cleaning up %p", (void *)txq_ctrl);
- txq_free_elts(txq_ctrl);
- if (txq_ctrl->qp != NULL)
- claim_zero(ibv_destroy_qp(txq_ctrl->qp));
- if (txq_ctrl->cq != NULL)
- claim_zero(ibv_destroy_cq(txq_ctrl->cq));
- for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- if (txq_ctrl->txq.mp2mr[i].mr == NULL)
- break;
- claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
+ uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT);
+ struct mlx5_dev_config *config = &priv->config;
+
+ if (config->hw_csum)
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (config->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->tunnel_en) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
}
- memset(txq_ctrl, 0, sizeof(*txq_ctrl));
+ return offloads;
}
/**
- * Initialize TX queue.
+ * Checks if the per-queue offload configuration is valid.
*
- * @param tmpl
- * Pointer to TX queue control template.
- * @param txq_ctrl
- * Pointer to TX queue control.
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 if the configuration is valid, 0 otherwise.
*/
-static inline int
-txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
{
- struct mlx5_qp *qp = to_mqp(tmpl->qp);
- struct ibv_cq *ibcq = tmpl->cq;
- struct ibv_mlx5_cq_info cq_info;
+ uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
- if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
- ERROR("Unable to query CQ info. check your OFED.");
- return ENOTSUP;
- }
- if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
- return EINVAL;
- }
- tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
- tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
- tmpl->txq.wqes = qp->gen_data.sqstart;
- tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
- tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
- tmpl->txq.bf_reg = qp->gen_data.bf->reg;
- tmpl->txq.cq_db = cq_info.dbrec;
- tmpl->txq.cqes =
- (volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq_info.buf;
- tmpl->txq.elts =
- (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
- ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
- return 0;
+ /* There are no Tx offloads which are per queue. */
+ if ((offloads & port_supp_offloads) != offloads)
+ return 0;
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return 0;
+ return 1;
}
/**
- * Configure a TX queue.
+ * DPDK callback to configure a TX queue.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param txq_ctrl
- * Pointer to TX queue structure.
+ * @param idx
+ * TX queue index.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* Thresholds parameters.
*
* @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ int ret = 0;
+
+ priv_lock(priv);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx5_priv_get_tx_port_offloads(priv));
+ goto out;
+ }
+ if (desc <= MLX5_TX_COMP_THRESH) {
+ WARN("%p: number of descriptors requested for TX queue %u"
+ " must be higher than MLX5_TX_COMP_THRESH, using"
+ " %u instead of %u",
+ (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ desc = MLX5_TX_COMP_THRESH + 1;
+ }
+ if (!rte_is_power_of_2(desc)) {
+ desc = 1 << log2above(desc);
+ WARN("%p: increased number of descriptors in TX queue %u"
+ " to the next power of two (%d)",
+ (void *)dev, idx, desc);
+ }
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+ if (idx >= priv->txqs_n) {
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, priv->txqs_n);
+ priv_unlock(priv);
+ return -EOVERFLOW;
+ }
+ if (!mlx5_priv_txq_releasable(priv, idx)) {
+ ret = EBUSY;
+ ERROR("%p: unable to release queue index %u",
+ (void *)dev, idx);
+ goto out;
+ }
+ mlx5_priv_txq_release(priv, idx);
+ txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
+ if (!txq_ctrl) {
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ ret = ENOMEM;
+ goto out;
+ }
+ DEBUG("%p: adding TX queue %p to list",
+ (void *)dev, (void *)txq_ctrl);
+ (*priv->txqs)[idx] = &txq_ctrl->txq;
+out:
+ priv_unlock(priv);
+ return -ret;
+}
+
+/**
+ * DPDK callback to release a TX queue.
+ *
+ * @param dpdk_txq
+ * Generic TX queue pointer.
+ */
+void
+mlx5_tx_queue_release(void *dpdk_txq)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ struct priv *priv;
+ unsigned int i;
+
+ if (txq == NULL)
+ return;
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ priv = txq_ctrl->priv;
+ priv_lock(priv);
+ for (i = 0; (i != priv->txqs_n); ++i)
+ if ((*priv->txqs)[i] == txq) {
+ DEBUG("%p: removing TX queue %p from list",
+ (void *)priv->dev, (void *)txq_ctrl);
+ mlx5_priv_txq_release(priv, i);
+ break;
+ }
+ priv_unlock(priv);
+}
+
+
+/**
+ * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
+ * Both primary and secondary process do mmap to make UAR address
+ * aligned.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param fd
+ * Verbs file descriptor to map UAR pages.
+ *
+ * @return
* 0 on success, errno value on failure.
*/
int
-txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
- uint16_t desc, unsigned int socket,
- const struct rte_eth_txconf *conf)
+priv_tx_uar_remap(struct priv *priv, int fd)
{
- struct priv *priv = mlx5_get_priv(dev);
- struct txq_ctrl tmpl = {
- .priv = priv,
- .socket = socket,
- };
+ unsigned int i, j;
+ uintptr_t pages[priv->txqs_n];
+ unsigned int pages_n = 0;
+ uintptr_t uar_va;
+ uintptr_t off;
+ void *addr;
+ void *ret;
+ struct mlx5_txq_data *txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ int already_mapped;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ int r;
+
+ memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
+ /*
+ * As rdma-core, UARs are mapped in size of OS page size.
+ * Use aligned address to avoid duplicate mmap.
+ * Ref to libmlx5 function: mlx5_init_context()
+ */
+ for (i = 0; i != priv->txqs_n; ++i) {
+ if (!(*priv->txqs)[i])
+ continue;
+ txq = (*priv->txqs)[i];
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ /* UAR addr form verbs used to find dup and offset in page. */
+ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
+ off = uar_va & (page_size - 1); /* offset in page. */
+ uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
+ already_mapped = 0;
+ for (j = 0; j != pages_n; ++j) {
+ if (pages[j] == uar_va) {
+ already_mapped = 1;
+ break;
+ }
+ }
+ /* new address in reserved UAR address space. */
+ addr = RTE_PTR_ADD(priv->uar_base,
+ uar_va & (MLX5_UAR_SIZE - 1));
+ if (!already_mapped) {
+ pages[pages_n++] = uar_va;
+ /* fixed mmap to specified address in reserved
+ * address space.
+ */
+ ret = mmap(addr, page_size,
+ PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
+ txq_ctrl->uar_mmap_offset);
+ if (ret != addr) {
+ /* fixed mmap have to return same address */
+ ERROR("call to mmap failed on UAR for txq %d\n",
+ i);
+ r = ENXIO;
+ return r;
+ }
+ }
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
+ txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
+ else
+ assert(txq_ctrl->txq.bf_reg ==
+ RTE_PTR_ADD((void *)addr, off));
+ }
+ return 0;
+}
+
+/**
+ * Check if the burst function is using eMPW.
+ *
+ * @param tx_pkt_burst
+ * Tx burst function pointer.
+ *
+ * @return
+ * 1 if the burst function is using eMPW, 0 otherwise.
+ */
+static int
+is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
+{
+ if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
+ tx_pkt_burst == mlx5_tx_burst_vec ||
+ tx_pkt_burst == mlx5_tx_burst_empw)
+ return 1;
+ return 0;
+}
+
+/**
+ * Create the Tx queue Verbs object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object initialised if it can be created.
+ */
+struct mlx5_txq_ibv*
+mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq_data, struct mlx5_txq_ctrl, txq);
+ struct mlx5_txq_ibv tmpl;
+ struct mlx5_txq_ibv *txq_ibv;
union {
- struct ibv_exp_qp_init_attr init;
- struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_qp_attr mod;
- struct ibv_exp_cq_attr cq_attr;
+ struct ibv_qp_init_attr_ex init;
+ struct ibv_cq_init_attr_ex cq;
+ struct ibv_qp_attr mod;
+ struct ibv_cq_ex cq_attr;
} attr;
unsigned int cqe_n;
- const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +
- (RTE_CACHE_LINE_SIZE - 1)) /
- RTE_CACHE_LINE_SIZE);
+ struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_obj obj;
+ const int desc = 1 << txq_data->elts_n;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int ret = 0;
+ assert(txq_data);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
+ priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
- ret = ENOTSUP;
ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
goto error;
}
- tmpl.txq.flags = conf->txq_flags;
- assert(desc > MLX5_TX_COMP_THRESH);
- tmpl.txq.elts_n = log2above(desc);
- if (priv->mps == MLX5_MPW_ENHANCED)
- tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
+ memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
/* MRs will be registered in mp2mr[] later. */
- attr.cq = (struct ibv_exp_cq_init_attr){
+ attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
- if (priv->mps == MLX5_MPW_ENHANCED)
+ if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = ibv_exp_create_cq(priv->ctx,
- cqe_n,
- NULL, NULL, 0, &attr.cq);
+ tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
- ret = ENOMEM;
- ERROR("%p: CQ creation failure: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: CQ creation failure", (void *)txq_ctrl);
goto error;
}
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- attr.init = (struct ibv_exp_qp_init_attr){
+ attr.init = (struct ibv_qp_init_attr_ex){
/* CQ to be associated with the send queue. */
.send_cq = tmpl.cq,
/* CQ to be associated with the receive queue. */
.recv_cq = tmpl.cq,
.cap = {
/* Max number of outstanding WRs. */
- .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
+ .max_send_wr =
+ ((priv->device_attr.orig_attr.max_qp_wr <
+ desc) ?
+ priv->device_attr.orig_attr.max_qp_wr :
+ desc),
/*
* Max number of scatter/gather elements in a WR,
* must be 1 to prevent libmlx5 from trying to affect
.max_send_sge = 1,
},
.qp_type = IBV_QPT_RAW_PACKET,
- /* Do *NOT* enable this, completions events are managed per
- * TX burst. */
+ /*
+ * Do *NOT* enable this, completions events are managed per
+ * Tx burst.
+ */
.sq_sig_all = 0,
.pd = priv->pd,
- .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
+ .comp_mask = IBV_QP_INIT_ATTR_PD,
};
- if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
- tmpl.txq.max_inline =
- ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
- RTE_CACHE_LINE_SIZE);
- tmpl.txq.inline_en = 1;
- /* TSO and MPS can't be enabled concurrently. */
- assert(!priv->tso || !priv->mps);
- if (priv->mps == MLX5_MPW_ENHANCED) {
- tmpl.txq.inline_max_packet_sz =
- priv->inline_max_packet_sz;
- /* To minimize the size of data set, avoid requesting
- * too large WQ.
- */
- attr.init.cap.max_inline_data =
- ((RTE_MIN(priv->txq_inline,
- priv->inline_max_packet_sz) +
- (RTE_CACHE_LINE_SIZE - 1)) /
- RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->tso) {
- int inline_diff = tmpl.txq.max_inline - max_tso_inline;
-
- /*
- * Adjust inline value as Verbs aggregates
- * tso_inline and txq_inline fields.
- */
- attr.init.cap.max_inline_data = inline_diff > 0 ?
- inline_diff *
- RTE_CACHE_LINE_SIZE :
- 0;
- } else {
- attr.init.cap.max_inline_data =
- tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
- }
+ if (txq_data->max_inline)
+ attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
+ if (txq_data->tso_en) {
+ attr.init.max_tso_header = txq_ctrl->max_tso_header;
+ attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
- if (priv->tso) {
- attr.init.max_tso_header =
- max_tso_inline * RTE_CACHE_LINE_SIZE;
- attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
- tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
- max_tso_inline);
- tmpl.txq.tso_en = 1;
- }
- if (priv->tunnel_en)
- tmpl.txq.tunnel_en = 1;
- tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
+ tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("%p: QP creation failure: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP creation failure", (void *)txq_ctrl);
goto error;
}
- DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u,"
- " max_inline_data=%u",
- attr.init.cap.max_send_wr,
- attr.init.cap.max_send_sge,
- attr.init.cap.max_inline_data);
- attr.mod = (struct ibv_exp_qp_attr){
+ attr.mod = (struct ibv_qp_attr){
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
/* Primary port number. */
.port_num = priv->port
};
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,
- (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
goto error;
}
- ret = txq_setup(&tmpl, txq_ctrl);
- if (ret) {
- ERROR("%p: cannot initialize TX queue structure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- txq_alloc_elts(&tmpl, desc);
- attr.mod = (struct ibv_exp_qp_attr){
+ attr.mod = (struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR
};
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
goto error;
}
- /* Clean up txq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
- txq_cleanup(txq_ctrl);
- *txq_ctrl = tmpl;
- DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
- /* Pre-register known mempools. */
- rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
- assert(ret == 0);
- return 0;
+ txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
+ txq_ctrl->socket);
+ if (!txq_ibv) {
+ ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
+ goto error;
+ }
+ obj.cq.in = tmpl.cq;
+ obj.cq.out = &cq_info;
+ obj.qp.in = tmpl.qp;
+ obj.qp.out = &qp;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
+ if (ret != 0)
+ goto error;
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
+ "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ goto error;
+ }
+ txq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
+ txq_data->wqes = qp.sq.buf;
+ txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
+ txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
+ txq_ctrl->bf_reg_orig = qp.bf.reg;
+ txq_data->cq_db = cq_info.dbrec;
+ txq_data->cqes =
+ (volatile struct mlx5_cqe (*)[])
+ (uintptr_t)cq_info.buf;
+ txq_data->cq_ci = 0;
+#ifndef NDEBUG
+ txq_data->cq_pi = 0;
+#endif
+ txq_data->wqe_ci = 0;
+ txq_data->wqe_pi = 0;
+ txq_ibv->qp = tmpl.qp;
+ txq_ibv->cq = tmpl.cq;
+ rte_atomic32_inc(&txq_ibv->refcnt);
+ if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
+ txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
+ } else {
+ ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
+ goto error;
+ }
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return txq_ibv;
error:
- txq_cleanup(&tmpl);
- assert(ret > 0);
+ if (tmpl.cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
+ if (tmpl.qp)
+ claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return NULL;
+}
+
+/**
+ * Get an Tx queue Verbs object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object if it exists.
+ */
+struct mlx5_txq_ibv*
+mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_txq_ctrl *txq_ctrl;
+
+ if (idx >= priv->txqs_n)
+ return NULL;
+ if (!(*priv->txqs)[idx])
+ return NULL;
+ txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ if (txq_ctrl->ibv) {
+ rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq_ctrl->ibv,
+ rte_atomic32_read(&txq_ctrl->ibv->refcnt));
+ }
+ return txq_ctrl->ibv;
+}
+
+/**
+ * Release an Tx verbs queue object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param txq_ibv
+ * Verbs Tx queue object.
+ *
+ * @return
+ * 0 on success, errno on failure.
+ */
+int
+mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused,
+ struct mlx5_txq_ibv *txq_ibv)
+{
+ assert(txq_ibv);
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
+ claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
+ claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
+ LIST_REMOVE(txq_ibv, next);
+ rte_free(txq_ibv);
+ return 0;
+ }
+ return EBUSY;
+}
+
+/**
+ * Return true if a single reference exists on the object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param txq_ibv
+ * Verbs Tx queue object.
+ */
+int
+mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused,
+ struct mlx5_txq_ibv *txq_ibv)
+{
+ assert(txq_ibv);
+ return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
+}
+
+/**
+ * Verify the Verbs Tx queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_txq_ibv_verify(struct priv *priv)
+{
+ int ret = 0;
+ struct mlx5_txq_ibv *txq_ibv;
+
+ LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
+ DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
+ (void *)txq_ibv);
+ ++ret;
+ }
return ret;
}
/**
- * DPDK callback to configure a TX queue.
+ * Set Tx queue parameters from device configuration.
*
- * @param dev
- * Pointer to Ethernet device structure.
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
+ */
+static void
+txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ struct priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
+ const unsigned int max_tso_inline =
+ ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ unsigned int txq_inline;
+ unsigned int txqs_inline;
+ unsigned int inline_max_packet_sz;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+ int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
+
+ txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txq_inline;
+ txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txqs_inline;
+ inline_max_packet_sz =
+ (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
+ 0 : config->inline_max_packet_sz;
+ if (is_empw_func) {
+ if (config->txq_inline == MLX5_ARG_UNSET)
+ txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
+ inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
+ txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
+ }
+ if (txq_inline && priv->txqs_n >= txqs_inline) {
+ unsigned int ds_cnt;
+
+ txq_ctrl->txq.max_inline =
+ ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ if (is_empw_func) {
+ /* To minimize the size of data set, avoid requesting
+ * too large WQ.
+ */
+ txq_ctrl->max_inline_data =
+ ((RTE_MIN(txq_inline,
+ inline_max_packet_sz) +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
+ } else if (tso) {
+ int inline_diff = txq_ctrl->txq.max_inline -
+ max_tso_inline;
+
+ /*
+ * Adjust inline value as Verbs aggregates
+ * tso_inline and txq_inline fields.
+ */
+ txq_ctrl->max_inline_data = inline_diff > 0 ?
+ inline_diff *
+ RTE_CACHE_LINE_SIZE :
+ 0;
+ } else {
+ txq_ctrl->max_inline_data =
+ txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
+ }
+ /*
+ * Check if the inline size is too large in a way which
+ * can make the WQE DS to overflow.
+ * Considering in calculation:
+ * WQE CTRL (1 DS)
+ * WQE ETH (1 DS)
+ * Inline part (N DS)
+ */
+ ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
+ if (ds_cnt > MLX5_DSEG_MAX) {
+ unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
+ MLX5_WQE_DWORD_SIZE;
+
+ max_inline = max_inline - (max_inline %
+ RTE_CACHE_LINE_SIZE);
+ WARN("txq inline is too large (%d) setting it to "
+ "the maximum possible: %d\n",
+ txq_inline, max_inline);
+ txq_ctrl->txq.max_inline = max_inline /
+ RTE_CACHE_LINE_SIZE;
+ }
+ }
+ if (tso) {
+ txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
+ max_tso_inline);
+ txq_ctrl->txq.tso_en = 1;
+ }
+ txq_ctrl->txq.tunnel_en = config->tunnel_en;
+}
+
+/**
+ * Create a DPDK Tx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
* @param idx
* TX queue index.
* @param desc
* @param socket
* NUMA socket on which memory must be allocated.
* @param[in] conf
- * Thresholds parameters.
+ * Thresholds parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * A DPDK queue object on success.
*/
-int
-mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf)
+struct mlx5_txq_ctrl*
+mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
- struct txq *txq = (*priv->txqs)[idx];
- struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
- int ret;
+ struct mlx5_txq_ctrl *tmpl;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
+ tmpl = rte_calloc_socket("TXQ", 1,
+ sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl)
+ return NULL;
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl->txq.offloads = conf->offloads;
+ tmpl->priv = priv;
+ tmpl->socket = socket;
+ tmpl->txq.elts_n = log2above(desc);
+ txq_set_params(tmpl);
+ /* MRs will be registered in mp2mr[] later. */
+ DEBUG("priv->device_attr.max_qp_wr is %d",
+ priv->device_attr.orig_attr.max_qp_wr);
+ DEBUG("priv->device_attr.max_sge is %d",
+ priv->device_attr.orig_attr.max_sge);
+ tmpl->txq.elts =
+ (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
+ tmpl->txq.stats.idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
+ return tmpl;
+}
- priv_lock(priv);
- if (desc <= MLX5_TX_COMP_THRESH) {
- WARN("%p: number of descriptors requested for TX queue %u"
- " must be higher than MLX5_TX_COMP_THRESH, using"
- " %u instead of %u",
- (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
- desc = MLX5_TX_COMP_THRESH + 1;
- }
- if (!rte_is_power_of_2(desc)) {
- desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in TX queue %u"
- " to the next power of two (%d)",
- (void *)dev, idx, desc);
+/**
+ * Get a Tx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists.
+ */
+struct mlx5_txq_ctrl*
+mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_txq_ctrl *ctrl = NULL;
+
+ if ((*priv->txqs)[idx]) {
+ ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
+ txq);
+ unsigned int i;
+
+ mlx5_priv_txq_ibv_get(priv, idx);
+ for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
+ if (ctrl->txq.mp2mr[i])
+ claim_nonzero
+ (priv_mr_get(priv,
+ ctrl->txq.mp2mr[i]->mp));
+ }
+ rte_atomic32_inc(&ctrl->refcnt);
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
}
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
- if (idx >= priv->txqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
+ return ctrl;
+}
+
+/**
+ * Release a Tx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 0 on success, errno on failure.
+ */
+int
+mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
+{
+ unsigned int i;
+ struct mlx5_txq_ctrl *txq;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+
+ if (!(*priv->txqs)[idx])
+ return 0;
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq, rte_atomic32_read(&txq->refcnt));
+ if (txq->ibv) {
+ int ret;
+
+ ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
+ if (!ret)
+ txq->ibv = NULL;
}
- if (txq != NULL) {
- DEBUG("%p: reusing already allocated queue index %u (%p)",
- (void *)dev, idx, (void *)txq);
- if (priv->started) {
- priv_unlock(priv);
- return -EEXIST;
- }
- (*priv->txqs)[idx] = NULL;
- txq_cleanup(txq_ctrl);
- /* Resize if txq size is changed. */
- if (txq_ctrl->txq.elts_n != log2above(desc)) {
- txq_ctrl = rte_realloc(txq_ctrl,
- sizeof(*txq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
- RTE_CACHE_LINE_SIZE);
- if (!txq_ctrl) {
- ERROR("%p: unable to reallocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
- }
- }
- } else {
- txq_ctrl =
- rte_calloc_socket("TXQ", 1,
- sizeof(*txq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
- 0, socket);
- if (txq_ctrl == NULL) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
+ for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
+ if (txq->txq.mp2mr[i]) {
+ priv_mr_release(priv, txq->txq.mp2mr[i]);
+ txq->txq.mp2mr[i] = NULL;
}
}
- ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
- if (ret)
- rte_free(txq_ctrl);
- else {
- txq_ctrl->txq.stats.idx = idx;
- DEBUG("%p: adding TX queue %p to list",
- (void *)dev, (void *)txq_ctrl);
- (*priv->txqs)[idx] = &txq_ctrl->txq;
+ if (priv->uar_base)
+ munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
+ page_size), page_size);
+ if (rte_atomic32_dec_and_test(&txq->refcnt)) {
+ txq_free_elts(txq);
+ LIST_REMOVE(txq, next);
+ rte_free(txq);
+ (*priv->txqs)[idx] = NULL;
+ return 0;
}
- priv_unlock(priv);
- return -ret;
+ return EBUSY;
}
/**
- * DPDK callback to release a TX queue.
+ * Verify if the queue can be released.
*
- * @param dpdk_txq
- * Generic TX queue pointer.
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 if the queue can be released.
*/
-void
-mlx5_tx_queue_release(void *dpdk_txq)
+int
+mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
{
- struct txq *txq = (struct txq *)dpdk_txq;
- struct txq_ctrl *txq_ctrl;
- struct priv *priv;
- unsigned int i;
+ struct mlx5_txq_ctrl *txq;
- if (mlx5_is_secondary())
- return;
+ if (!(*priv->txqs)[idx])
+ return -1;
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ return (rte_atomic32_read(&txq->refcnt) == 1);
+}
- if (txq == NULL)
- return;
- txq_ctrl = container_of(txq, struct txq_ctrl, txq);
- priv = txq_ctrl->priv;
- priv_lock(priv);
- for (i = 0; (i != priv->txqs_n); ++i)
- if ((*priv->txqs)[i] == txq) {
- DEBUG("%p: removing TX queue %p from list",
- (void *)priv->dev, (void *)txq_ctrl);
- (*priv->txqs)[i] = NULL;
- break;
- }
- txq_cleanup(txq_ctrl);
- rte_free(txq_ctrl);
- priv_unlock(priv);
+/**
+ * Verify the Tx Queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_txq_verify(struct priv *priv)
+{
+ struct mlx5_txq_ctrl *txq;
+ int ret = 0;
+
+ LIST_FOREACH(txq, &priv->txqsctrl, next) {
+ DEBUG("%p: Tx Queue %p still referenced", (void *)priv,
+ (void *)txq);
+ ++ret;
+ }
+ return ret;
}