X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=f551f87fa68577a667ec82038871e35418b515b2;hb=8bb00fb185a49028b9b63111ec2b98b7de8749cf;hp=2bae61f57fcaf10ab35d89156218e77676693117;hpb=2e22920b85d9a787a696f9b666e233ca7501f042;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 2bae61f57f..f551f87fa6 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -36,139 +36,91 @@ #include #include #include +#include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif -/* DPDK headers don't like -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" -#endif #include #include #include #include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" -#endif #include "mlx5_utils.h" +#include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" -#include "mlx5_defs.h" /** * Allocate TX queue elements. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. * @param elts_n * Number of elements to allocate. - * - * @return - * 0 on success, errno value on failure. */ -static int -txq_alloc_elts(struct txq *txq, unsigned int elts_n) +static void +txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl, unsigned int elts_n) { unsigned int i; - struct txq_elt (*elts)[elts_n] = - rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket); - linear_t (*elts_linear)[elts_n] = - rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0, - txq->socket); - struct ibv_mr *mr_linear = NULL; - int ret = 0; - if ((elts == NULL) || (elts_linear == NULL)) { - ERROR("%p: can't allocate packets array", (void *)txq); - ret = ENOMEM; - goto error; - } - mr_linear = - ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear), - (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); - if (mr_linear == NULL) { - ERROR("%p: unable to configure MR, ibv_reg_mr() failed", - (void *)txq); - ret = EINVAL; - goto error; - } - for (i = 0; (i != elts_n); ++i) { - struct txq_elt *elt = &(*elts)[i]; + for (i = 0; (i != elts_n); ++i) + (*txq_ctrl->txq.elts)[i] = NULL; + for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) { + volatile struct mlx5_wqe64 *wqe = + (volatile struct mlx5_wqe64 *) + txq_ctrl->txq.wqes + i; - elt->buf = NULL; + memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe)); } - DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n); - txq->elts_n = elts_n; - txq->elts = elts; - txq->elts_head = 0; - txq->elts_tail = 0; - txq->elts_comp = 0; - /* Request send completion every MLX5_PMD_TX_PER_COMP_REQ packets or - * at least 4 times per ring. */ - txq->elts_comp_cd_init = - ((MLX5_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ? - MLX5_PMD_TX_PER_COMP_REQ : (elts_n / 4)); - txq->elts_comp_cd = txq->elts_comp_cd_init; - txq->elts_linear = elts_linear; - txq->mr_linear = mr_linear; - assert(ret == 0); - return 0; -error: - if (mr_linear != NULL) - claim_zero(ibv_dereg_mr(mr_linear)); - - rte_free(elts_linear); - rte_free(elts); - - DEBUG("%p: failed, freed everything", (void *)txq); - assert(ret > 0); - return ret; + DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n); + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; } /** * Free TX queue elements. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. */ static void -txq_free_elts(struct txq *txq) +txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) { - unsigned int i; - unsigned int elts_n = txq->elts_n; - struct txq_elt (*elts)[elts_n] = txq->elts; - linear_t (*elts_linear)[elts_n] = txq->elts_linear; - struct ibv_mr *mr_linear = txq->mr_linear; + const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n; + const uint16_t elts_m = elts_n - 1; + uint16_t elts_head = txq_ctrl->txq.elts_head; + uint16_t elts_tail = txq_ctrl->txq.elts_tail; + struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; - DEBUG("%p: freeing WRs", (void *)txq); - txq->elts_n = 0; - txq->elts = NULL; - txq->elts_linear = NULL; - txq->mr_linear = NULL; - if (mr_linear != NULL) - claim_zero(ibv_dereg_mr(mr_linear)); + DEBUG("%p: freeing WRs", (void *)txq_ctrl); + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; - rte_free(elts_linear); - if (elts == NULL) - return; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct txq_elt *elt = &(*elts)[i]; + while (elts_tail != elts_head) { + struct rte_mbuf *elt = (*elts)[elts_tail & elts_m]; - if (elt->buf == NULL) - continue; - rte_pktmbuf_free(elt->buf); + assert(elt != NULL); + rte_pktmbuf_free_seg(elt); +#ifndef NDEBUG + /* Poisoning. */ + memset(&(*elts)[elts_tail & elts_m], + 0x77, + sizeof((*elts)[elts_tail & elts_m])); +#endif + ++elts_tail; } - rte_free(elts); } /** @@ -176,61 +128,83 @@ txq_free_elts(struct txq *txq) * * Destroy objects, free allocated memory and reset the structure for reuse. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. */ void -txq_cleanup(struct txq *txq) +mlx5_txq_cleanup(struct mlx5_txq_ctrl *txq_ctrl) { - struct ibv_exp_release_intf_params params; size_t i; - DEBUG("cleaning up %p", (void *)txq); - txq_free_elts(txq); - if (txq->if_qp != NULL) { - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - assert(txq->qp != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq->priv->ctx, - txq->if_qp, - ¶ms)); - } - if (txq->if_cq != NULL) { - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - assert(txq->cq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq->priv->ctx, - txq->if_cq, - ¶ms)); + DEBUG("cleaning up %p", (void *)txq_ctrl); + txq_free_elts(txq_ctrl); + if (txq_ctrl->qp != NULL) + claim_zero(ibv_destroy_qp(txq_ctrl->qp)); + if (txq_ctrl->cq != NULL) + claim_zero(ibv_destroy_cq(txq_ctrl->cq)); + for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { + if (txq_ctrl->txq.mp2mr[i].mr == NULL) + break; + claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr)); } - if (txq->qp != NULL) - claim_zero(ibv_destroy_qp(txq->qp)); - if (txq->cq != NULL) - claim_zero(ibv_destroy_cq(txq->cq)); - if (txq->rd != NULL) { - struct ibv_exp_destroy_res_domain_attr attr = { - .comp_mask = 0, - }; + memset(txq_ctrl, 0, sizeof(*txq_ctrl)); +} - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx, - txq->rd, - &attr)); +/** + * Initialize TX queue. + * + * @param tmpl + * Pointer to TX queue control template. + * @param txq_ctrl + * Pointer to TX queue control. + * + * @return + * 0 on success, errno value on failure. + */ +static inline int +txq_setup(struct mlx5_txq_ctrl *tmpl, struct mlx5_txq_ctrl *txq_ctrl) +{ + struct mlx5dv_qp qp; + struct ibv_cq *ibcq = tmpl->cq; + struct mlx5dv_cq cq_info; + struct mlx5dv_obj obj; + int ret = 0; + + qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET; + obj.cq.in = ibcq; + obj.cq.out = &cq_info; + obj.qp.in = tmpl->qp; + obj.qp.out = &qp; + ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); + if (ret != 0) { + return -EINVAL; } - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (txq->mp2mr[i].mp == NULL) - break; - assert(txq->mp2mr[i].mr != NULL); - claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr)); + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { + ERROR("Wrong MLX5_CQE_SIZE environment variable value: " + "it should be set to %u", RTE_CACHE_LINE_SIZE); + return EINVAL; } - memset(txq, 0, sizeof(*txq)); + tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt); + tmpl->txq.qp_num_8s = tmpl->qp->qp_num << 8; + tmpl->txq.wqes = qp.sq.buf; + tmpl->txq.wqe_n = log2above(qp.sq.wqe_cnt); + tmpl->txq.qp_db = &qp.dbrec[MLX5_SND_DBR]; + tmpl->txq.bf_reg = qp.bf.reg; + tmpl->txq.cq_db = cq_info.dbrec; + tmpl->txq.cqes = + (volatile struct mlx5_cqe (*)[]) + (uintptr_t)cq_info.buf; + tmpl->txq.elts = + (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n]) + ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); + if (qp.comp_mask | MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { + tmpl->uar_mmap_offset = qp.uar_mmap_offset; + } else { + ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); + return EINVAL; + } + + return 0; } /** @@ -238,7 +212,7 @@ txq_cleanup(struct txq *txq) * * @param dev * Pointer to Ethernet device structure. - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. * @param desc * Number of descriptors to configure in queue. @@ -250,51 +224,49 @@ txq_cleanup(struct txq *txq) * @return * 0 on success, errno value on failure. */ -static int -txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, - unsigned int socket, const struct rte_eth_txconf *conf) +int +mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf) { - struct priv *priv = dev->data->dev_private; - struct txq tmpl = { + struct priv *priv = mlx5_get_priv(dev); + struct mlx5_txq_ctrl tmpl = { .priv = priv, - .socket = socket + .socket = socket, }; union { - struct ibv_exp_query_intf_params params; - struct ibv_exp_qp_init_attr init; - struct ibv_exp_res_domain_init_attr rd; - struct ibv_exp_cq_init_attr cq; - struct ibv_exp_qp_attr mod; + struct ibv_qp_init_attr_ex init; + struct ibv_cq_init_attr_ex cq; + struct ibv_qp_attr mod; + struct ibv_cq_ex cq_attr; } attr; - enum ibv_exp_query_intf_status status; + unsigned int cqe_n; + const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); int ret = 0; - (void)conf; /* Thresholds configuration (ignored). */ - if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) { - ERROR("%p: invalid number of TX descriptors (must be a" - " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); - return EINVAL; - } - desc /= MLX5_PMD_SGE_WR_N; - /* MRs will be registered in mp2mr[] later. */ - attr.rd = (struct ibv_exp_res_domain_init_attr){ - .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | - IBV_EXP_RES_DOMAIN_MSG_MODEL), - .thread_model = IBV_EXP_THREAD_SINGLE, - .msg_model = IBV_EXP_MSG_HIGH_BW, - }; - tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); - if (tmpl.rd == NULL) { - ret = ENOMEM; - ERROR("%p: RD creation failure: %s", - (void *)dev, strerror(ret)); + if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { + ret = ENOTSUP; + ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); goto error; } - attr.cq = (struct ibv_exp_cq_init_attr){ - .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, - .res_domain = tmpl.rd, + tmpl.txq.flags = conf->txq_flags; + assert(desc > MLX5_TX_COMP_THRESH); + tmpl.txq.elts_n = log2above(desc); + if (priv->mps == MLX5_MPW_ENHANCED) + tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; + /* MRs will be registered in mp2mr[] later. */ + attr.cq = (struct ibv_cq_init_attr_ex){ + .comp_mask = 0, }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); + cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ? + ((desc / MLX5_TX_COMP_THRESH) - 1) : 1; + if (priv->mps == MLX5_MPW_ENHANCED) + cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; + tmpl.cq = ibv_create_cq(priv->ctx, + cqe_n, + NULL, NULL, 0); if (tmpl.cq == NULL) { ret = ENOMEM; ERROR("%p: CQ creation failure: %s", @@ -302,116 +274,163 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, goto error; } DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.max_qp_wr); + priv->device_attr.orig_attr.max_qp_wr); DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.max_sge); - attr.init = (struct ibv_exp_qp_init_attr){ + priv->device_attr.orig_attr.max_sge); + attr.init = (struct ibv_qp_init_attr_ex){ /* CQ to be associated with the send queue. */ .send_cq = tmpl.cq, /* CQ to be associated with the receive queue. */ .recv_cq = tmpl.cq, .cap = { /* Max number of outstanding WRs. */ - .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? - priv->device_attr.max_qp_wr : - desc), - /* Max number of scatter/gather elements in a WR. */ - .max_send_sge = ((priv->device_attr.max_sge < - MLX5_PMD_SGE_WR_N) ? - priv->device_attr.max_sge : - MLX5_PMD_SGE_WR_N), -#if MLX5_PMD_MAX_INLINE > 0 - .max_inline_data = MLX5_PMD_MAX_INLINE, -#endif + .max_send_wr = + ((priv->device_attr.orig_attr.max_qp_wr < desc) ? + priv->device_attr.orig_attr.max_qp_wr : + desc), + /* + * Max number of scatter/gather elements in a WR, + * must be 1 to prevent libmlx5 from trying to affect + * too much memory. TX gather is not impacted by the + * priv->device_attr.max_sge limit and will still work + * properly. + */ + .max_send_sge = 1, }, .qp_type = IBV_QPT_RAW_PACKET, /* Do *NOT* enable this, completions events are managed per * TX burst. */ .sq_sig_all = 0, .pd = priv->pd, - .res_domain = tmpl.rd, - .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | - IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), + .comp_mask = IBV_QP_INIT_ATTR_PD, }; - tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) { + unsigned int ds_cnt; + + tmpl.txq.max_inline = + ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); + tmpl.txq.inline_en = 1; + /* TSO and MPS can't be enabled concurrently. */ + assert(!priv->tso || !priv->mps); + if (priv->mps == MLX5_MPW_ENHANCED) { + tmpl.txq.inline_max_packet_sz = + priv->inline_max_packet_sz; + /* To minimize the size of data set, avoid requesting + * too large WQ. + */ + attr.init.cap.max_inline_data = + ((RTE_MIN(priv->txq_inline, + priv->inline_max_packet_sz) + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; + } else if (priv->tso) { + int inline_diff = tmpl.txq.max_inline - max_tso_inline; + + /* + * Adjust inline value as Verbs aggregates + * tso_inline and txq_inline fields. + */ + attr.init.cap.max_inline_data = inline_diff > 0 ? + inline_diff * + RTE_CACHE_LINE_SIZE : + 0; + } else { + attr.init.cap.max_inline_data = + tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; + } + /* + * Check if the inline size is too large in a way which + * can make the WQE DS to overflow. + * Considering in calculation: + * WQE CTRL (1 DS) + * WQE ETH (1 DS) + * Inline part (N DS) + */ + ds_cnt = 2 + + (attr.init.cap.max_inline_data / MLX5_WQE_DWORD_SIZE); + if (ds_cnt > MLX5_DSEG_MAX) { + unsigned int max_inline = (MLX5_DSEG_MAX - 2) * + MLX5_WQE_DWORD_SIZE; + + max_inline = max_inline - (max_inline % + RTE_CACHE_LINE_SIZE); + WARN("txq inline is too large (%d) setting it to " + "the maximum possible: %d\n", + priv->txq_inline, max_inline); + tmpl.txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; + attr.init.cap.max_inline_data = max_inline; + } + } + if (priv->tso) { + attr.init.max_tso_header = + max_tso_inline * RTE_CACHE_LINE_SIZE; + attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; + tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline, + max_tso_inline); + tmpl.txq.tso_en = 1; + } + if (priv->tunnel_en) + tmpl.txq.tunnel_en = 1; + tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { ret = (errno ? errno : EINVAL); ERROR("%p: QP creation failure: %s", (void *)dev, strerror(ret)); goto error; } -#if MLX5_PMD_MAX_INLINE > 0 - /* ibv_create_qp() updates this value. */ - tmpl.max_inline = attr.init.cap.max_inline_data; -#endif - attr.mod = (struct ibv_exp_qp_attr){ + DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u," + " max_inline_data=%u", + attr.init.cap.max_send_wr, + attr.init.cap.max_send_sge, + attr.init.cap.max_inline_data); + attr.mod = (struct ibv_qp_attr){ /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ .port_num = priv->port }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, - (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT)); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, + (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { ERROR("%p: QP state to IBV_QPS_INIT failed: %s", (void *)dev, strerror(ret)); goto error; } - ret = txq_alloc_elts(&tmpl, desc); + ret = txq_setup(&tmpl, txq_ctrl); if (ret) { - ERROR("%p: TXQ allocation failed: %s", + ERROR("%p: cannot initialize TX queue structure: %s", (void *)dev, strerror(ret)); goto error; } - attr.mod = (struct ibv_exp_qp_attr){ + txq_alloc_elts(&tmpl, desc); + attr.mod = (struct ibv_qp_attr){ .qp_state = IBV_QPS_RTR }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTR failed: %s", (void *)dev, strerror(ret)); goto error; } attr.mod.qp_state = IBV_QPS_RTS; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTS failed: %s", (void *)dev, strerror(ret)); goto error; } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.cq, - }; - tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_cq == NULL) { - ret = EINVAL; - ERROR("%p: CQ interface family query failed with status %d", - (void *)dev, status); - goto error; - } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_QP_BURST, - .obj = tmpl.qp, - }; - tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_qp == NULL) { - ret = EINVAL; - ERROR("%p: QP interface family query failed with status %d", - (void *)dev, status); - goto error; - } /* Clean up txq in case we're reinitializing it. */ - DEBUG("%p: cleaning-up old txq just in case", (void *)txq); - txq_cleanup(txq); - *txq = tmpl; - DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl); + DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl); + mlx5_txq_cleanup(txq_ctrl); + *txq_ctrl = tmpl; + DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl); + /* Pre-register known mempools. */ + rte_mempool_walk(mlx5_txq_mp2mr_iter, txq_ctrl); assert(ret == 0); return 0; error: - txq_cleanup(&tmpl); + mlx5_txq_cleanup(&tmpl); assert(ret > 0); return ret; } @@ -438,10 +457,28 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf) { struct priv *priv = dev->data->dev_private; - struct txq *txq = (*priv->txqs)[idx]; + struct mlx5_txq_data *txq = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); int ret; + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + priv_lock(priv); + if (desc <= MLX5_TX_COMP_THRESH) { + WARN("%p: number of descriptors requested for TX queue %u" + " must be higher than MLX5_TX_COMP_THRESH, using" + " %u instead of %u", + (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); + desc = MLX5_TX_COMP_THRESH + 1; + } + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + WARN("%p: increased number of descriptors in TX queue %u" + " to the next power of two (%d)", + (void *)dev, idx, desc); + } DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); if (idx >= priv->txqs_n) { @@ -453,30 +490,46 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (txq != NULL) { DEBUG("%p: reusing already allocated queue index %u (%p)", (void *)dev, idx, (void *)txq); - if (priv->started) { + if (dev->data->dev_started) { priv_unlock(priv); return -EEXIST; } (*priv->txqs)[idx] = NULL; - txq_cleanup(txq); + mlx5_txq_cleanup(txq_ctrl); + /* Resize if txq size is changed. */ + if (txq_ctrl->txq.elts_n != log2above(desc)) { + txq_ctrl = rte_realloc(txq_ctrl, + sizeof(*txq_ctrl) + + desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE); + if (!txq_ctrl) { + ERROR("%p: unable to reallocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } } else { - txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket); - if (txq == NULL) { + txq_ctrl = + rte_calloc_socket("TXQ", 1, + sizeof(*txq_ctrl) + + desc * sizeof(struct rte_mbuf *), + 0, socket); + if (txq_ctrl == NULL) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); priv_unlock(priv); return -ENOMEM; } } - ret = txq_setup(dev, txq, desc, socket, conf); + ret = mlx5_txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf); if (ret) - rte_free(txq); + rte_free(txq_ctrl); else { + txq_ctrl->txq.stats.idx = idx; DEBUG("%p: adding TX queue %p to list", - (void *)dev, (void *)txq); - (*priv->txqs)[idx] = txq; - /* Update send callback. */ - dev->tx_pkt_burst = mlx5_tx_burst; + (void *)dev, (void *)txq_ctrl); + (*priv->txqs)[idx] = &txq_ctrl->txq; } priv_unlock(priv); return -ret; @@ -491,22 +544,83 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, void mlx5_tx_queue_release(void *dpdk_txq) { - struct txq *txq = (struct txq *)dpdk_txq; + struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; + struct mlx5_txq_ctrl *txq_ctrl; struct priv *priv; unsigned int i; + if (mlx5_is_secondary()) + return; + if (txq == NULL) return; - priv = txq->priv; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + priv = txq_ctrl->priv; priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { DEBUG("%p: removing TX queue %p from list", - (void *)priv->dev, (void *)txq); + (void *)priv->dev, (void *)txq_ctrl); (*priv->txqs)[i] = NULL; break; } - txq_cleanup(txq); - rte_free(txq); + mlx5_txq_cleanup(txq_ctrl); + rte_free(txq_ctrl); priv_unlock(priv); } + + +/** + * Map locally UAR used in Tx queues for BlueFlame doorbell. + * + * @param[in] priv + * Pointer to private structure. + * @param fd + * Verbs file descriptor to map UAR pages. + * + * @return + * 0 on success, errno value on failure. + */ +int +priv_tx_uar_remap(struct priv *priv, int fd) +{ + unsigned int i, j; + uintptr_t pages[priv->txqs_n]; + unsigned int pages_n = 0; + uintptr_t uar_va; + void *addr; + struct mlx5_txq_data *txq; + struct mlx5_txq_ctrl *txq_ctrl; + int already_mapped; + size_t page_size = sysconf(_SC_PAGESIZE); + + /* + * As rdma-core, UARs are mapped in size of OS page size. + * Use aligned address to avoid duplicate mmap. + * Ref to libmlx5 function: mlx5_init_context() + */ + for (i = 0; i != priv->txqs_n; ++i) { + txq = (*priv->txqs)[i]; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + uar_va = (uintptr_t)txq_ctrl->txq.bf_reg; + uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); + already_mapped = 0; + for (j = 0; j != pages_n; ++j) { + if (pages[j] == uar_va) { + already_mapped = 1; + break; + } + } + if (already_mapped) + continue; + pages[pages_n++] = uar_va; + addr = mmap((void *)uar_va, page_size, + PROT_WRITE, MAP_FIXED | MAP_SHARED, fd, + txq_ctrl->uar_mmap_offset); + if (addr != (void *)uar_va) { + ERROR("call to mmap failed on UAR for txq %d\n", i); + return -1; + } + } + return 0; +}