X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=f551f87fa68577a667ec82038871e35418b515b2;hb=8bb00fb185a49028b9b63111ec2b98b7de8749cf;hp=4f17fb0f9f6e200deeae0f921a10122ec5d82dcc;hpb=230189d9ff22660ba147eda7933ebb21bb882bdc;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 4f17fb0f9f..f551f87fa6 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -36,35 +36,29 @@ #include #include #include +#include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif -/* DPDK headers don't like -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" -#endif #include #include #include #include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" -#endif #include "mlx5_utils.h" #include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" -#include "mlx5_defs.h" /** * Allocate TX queue elements. @@ -75,14 +69,16 @@ * Number of elements to allocate. */ static void -txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) +txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl, unsigned int elts_n) { unsigned int i; for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; - for (i = 0; (i != txq_ctrl->txq.wqe_n); ++i) { - volatile union mlx5_wqe *wqe = &(*txq_ctrl->txq.wqes)[i]; + for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) { + volatile struct mlx5_wqe64 *wqe = + (volatile struct mlx5_wqe64 *) + txq_ctrl->txq.wqes + i; memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe)); } @@ -99,11 +95,12 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) * Pointer to TX queue structure. */ static void -txq_free_elts(struct txq_ctrl *txq_ctrl) +txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) { - unsigned int elts_n = txq_ctrl->txq.elts_n; - unsigned int elts_head = txq_ctrl->txq.elts_head; - unsigned int elts_tail = txq_ctrl->txq.elts_tail; + const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n; + const uint16_t elts_m = elts_n - 1; + uint16_t elts_head = txq_ctrl->txq.elts_head; + uint16_t elts_tail = txq_ctrl->txq.elts_tail; struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; DEBUG("%p: freeing WRs", (void *)txq_ctrl); @@ -112,18 +109,17 @@ txq_free_elts(struct txq_ctrl *txq_ctrl) txq_ctrl->txq.elts_comp = 0; while (elts_tail != elts_head) { - struct rte_mbuf *elt = (*elts)[elts_tail]; + struct rte_mbuf *elt = (*elts)[elts_tail & elts_m]; assert(elt != NULL); - rte_pktmbuf_free(elt); + rte_pktmbuf_free_seg(elt); #ifndef NDEBUG /* Poisoning. */ - memset(&(*elts)[elts_tail], + memset(&(*elts)[elts_tail & elts_m], 0x77, - sizeof((*elts)[elts_tail])); + sizeof((*elts)[elts_tail & elts_m])); #endif - if (++elts_tail == elts_n) - elts_tail = 0; + ++elts_tail; } } @@ -136,54 +132,19 @@ txq_free_elts(struct txq_ctrl *txq_ctrl) * Pointer to TX queue structure. */ void -txq_cleanup(struct txq_ctrl *txq_ctrl) +mlx5_txq_cleanup(struct mlx5_txq_ctrl *txq_ctrl) { - struct ibv_exp_release_intf_params params; size_t i; DEBUG("cleaning up %p", (void *)txq_ctrl); txq_free_elts(txq_ctrl); - if (txq_ctrl->if_qp != NULL) { - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - assert(txq_ctrl->qp != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, - txq_ctrl->if_qp, - ¶ms)); - } - if (txq_ctrl->if_cq != NULL) { - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - assert(txq_ctrl->cq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, - txq_ctrl->if_cq, - ¶ms)); - } if (txq_ctrl->qp != NULL) claim_zero(ibv_destroy_qp(txq_ctrl->qp)); if (txq_ctrl->cq != NULL) claim_zero(ibv_destroy_cq(txq_ctrl->cq)); - if (txq_ctrl->rd != NULL) { - struct ibv_exp_destroy_res_domain_attr attr = { - .comp_mask = 0, - }; - - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx, - txq_ctrl->rd, - &attr)); - } for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { - if (txq_ctrl->txq.mp2mr[i].mp == NULL) + if (txq_ctrl->txq.mp2mr[i].mr == NULL) break; - assert(txq_ctrl->txq.mp2mr[i].mr != NULL); claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr)); } memset(txq_ctrl, 0, sizeof(*txq_ctrl)); @@ -201,34 +162,48 @@ txq_cleanup(struct txq_ctrl *txq_ctrl) * 0 on success, errno value on failure. */ static inline int -txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) +txq_setup(struct mlx5_txq_ctrl *tmpl, struct mlx5_txq_ctrl *txq_ctrl) { - struct mlx5_qp *qp = to_mqp(tmpl->qp); + struct mlx5dv_qp qp; struct ibv_cq *ibcq = tmpl->cq; - struct mlx5_cq *cq = to_mxxx(cq, cq); + struct mlx5dv_cq cq_info; + struct mlx5dv_obj obj; + int ret = 0; - if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET; + obj.cq.in = ibcq; + obj.cq.out = &cq_info; + obj.qp.in = tmpl->qp; + obj.qp.out = &qp; + ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); + if (ret != 0) { + return -EINVAL; + } + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); return EINVAL; } - tmpl->txq.cqe_n = ibcq->cqe + 1; - tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8; - tmpl->txq.wqes = - (volatile union mlx5_wqe (*)[]) - (uintptr_t)qp->gen_data.sqstart; - tmpl->txq.wqe_n = qp->sq.wqe_cnt; - tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR]; - tmpl->txq.bf_reg = qp->gen_data.bf->reg; - tmpl->txq.bf_offset = qp->gen_data.bf->offset; - tmpl->txq.bf_buf_size = qp->gen_data.bf->buf_size; - tmpl->txq.cq_db = cq->dbrec; + tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt); + tmpl->txq.qp_num_8s = tmpl->qp->qp_num << 8; + tmpl->txq.wqes = qp.sq.buf; + tmpl->txq.wqe_n = log2above(qp.sq.wqe_cnt); + tmpl->txq.qp_db = &qp.dbrec[MLX5_SND_DBR]; + tmpl->txq.bf_reg = qp.bf.reg; + tmpl->txq.cq_db = cq_info.dbrec; tmpl->txq.cqes = (volatile struct mlx5_cqe (*)[]) - (uintptr_t)cq->active_buf->buf; + (uintptr_t)cq_info.buf; tmpl->txq.elts = - (struct rte_mbuf *(*)[tmpl->txq.elts_n]) + (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n]) ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); + if (qp.comp_mask | MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { + tmpl->uar_mmap_offset = qp.uar_mmap_offset; + } else { + ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); + return EINVAL; + } + return 0; } @@ -250,24 +225,25 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) * 0 on success, errno value on failure. */ int -txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, - uint16_t desc, unsigned int socket, - const struct rte_eth_txconf *conf) +mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf) { struct priv *priv = mlx5_get_priv(dev); - struct txq_ctrl tmpl = { + struct mlx5_txq_ctrl tmpl = { .priv = priv, .socket = socket, }; union { - struct ibv_exp_query_intf_params params; - struct ibv_exp_qp_init_attr init; - struct ibv_exp_res_domain_init_attr rd; - struct ibv_exp_cq_init_attr cq; - struct ibv_exp_qp_attr mod; - struct ibv_exp_cq_attr cq_attr; + struct ibv_qp_init_attr_ex init; + struct ibv_cq_init_attr_ex cq; + struct ibv_qp_attr mod; + struct ibv_cq_ex cq_attr; } attr; - enum ibv_exp_query_intf_status status; + unsigned int cqe_n; + const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); int ret = 0; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { @@ -275,31 +251,22 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); goto error; } - (void)conf; /* Thresholds configuration (ignored). */ + tmpl.txq.flags = conf->txq_flags; assert(desc > MLX5_TX_COMP_THRESH); - tmpl.txq.elts_n = desc; + tmpl.txq.elts_n = log2above(desc); + if (priv->mps == MLX5_MPW_ENHANCED) + tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; /* MRs will be registered in mp2mr[] later. */ - attr.rd = (struct ibv_exp_res_domain_init_attr){ - .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | - IBV_EXP_RES_DOMAIN_MSG_MODEL), - .thread_model = IBV_EXP_THREAD_SINGLE, - .msg_model = IBV_EXP_MSG_HIGH_BW, + attr.cq = (struct ibv_cq_init_attr_ex){ + .comp_mask = 0, }; - tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); - if (tmpl.rd == NULL) { - ret = ENOMEM; - ERROR("%p: RD creation failure: %s", - (void *)dev, strerror(ret)); - goto error; - } - attr.cq = (struct ibv_exp_cq_init_attr){ - .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, - .res_domain = tmpl.rd, - }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, - (((desc / MLX5_TX_COMP_THRESH) - 1) ? - ((desc / MLX5_TX_COMP_THRESH) - 1) : 1), - NULL, NULL, 0, &attr.cq); + cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ? + ((desc / MLX5_TX_COMP_THRESH) - 1) : 1; + if (priv->mps == MLX5_MPW_ENHANCED) + cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; + tmpl.cq = ibv_create_cq(priv->ctx, + cqe_n, + NULL, NULL, 0); if (tmpl.cq == NULL) { ret = ENOMEM; ERROR("%p: CQ creation failure: %s", @@ -307,20 +274,27 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, goto error; } DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.max_qp_wr); + priv->device_attr.orig_attr.max_qp_wr); DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.max_sge); - attr.init = (struct ibv_exp_qp_init_attr){ + priv->device_attr.orig_attr.max_sge); + attr.init = (struct ibv_qp_init_attr_ex){ /* CQ to be associated with the send queue. */ .send_cq = tmpl.cq, /* CQ to be associated with the receive queue. */ .recv_cq = tmpl.cq, .cap = { /* Max number of outstanding WRs. */ - .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? - priv->device_attr.max_qp_wr : - desc), - /* Max number of scatter/gather elements in a WR. */ + .max_send_wr = + ((priv->device_attr.orig_attr.max_qp_wr < desc) ? + priv->device_attr.orig_attr.max_qp_wr : + desc), + /* + * Max number of scatter/gather elements in a WR, + * must be 1 to prevent libmlx5 from trying to affect + * too much memory. TX gather is not impacted by the + * priv->device_attr.max_sge limit and will still work + * properly. + */ .max_send_sge = 1, }, .qp_type = IBV_QPT_RAW_PACKET, @@ -328,29 +302,96 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, * TX burst. */ .sq_sig_all = 0, .pd = priv->pd, - .res_domain = tmpl.rd, - .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | - IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), + .comp_mask = IBV_QP_INIT_ATTR_PD, }; - if (priv->txq_inline && priv->txqs_n >= priv->txqs_inline) { - tmpl.txq.max_inline = priv->txq_inline; - attr.init.cap.max_inline_data = tmpl.txq.max_inline; + if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) { + unsigned int ds_cnt; + + tmpl.txq.max_inline = + ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); + tmpl.txq.inline_en = 1; + /* TSO and MPS can't be enabled concurrently. */ + assert(!priv->tso || !priv->mps); + if (priv->mps == MLX5_MPW_ENHANCED) { + tmpl.txq.inline_max_packet_sz = + priv->inline_max_packet_sz; + /* To minimize the size of data set, avoid requesting + * too large WQ. + */ + attr.init.cap.max_inline_data = + ((RTE_MIN(priv->txq_inline, + priv->inline_max_packet_sz) + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; + } else if (priv->tso) { + int inline_diff = tmpl.txq.max_inline - max_tso_inline; + + /* + * Adjust inline value as Verbs aggregates + * tso_inline and txq_inline fields. + */ + attr.init.cap.max_inline_data = inline_diff > 0 ? + inline_diff * + RTE_CACHE_LINE_SIZE : + 0; + } else { + attr.init.cap.max_inline_data = + tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; + } + /* + * Check if the inline size is too large in a way which + * can make the WQE DS to overflow. + * Considering in calculation: + * WQE CTRL (1 DS) + * WQE ETH (1 DS) + * Inline part (N DS) + */ + ds_cnt = 2 + + (attr.init.cap.max_inline_data / MLX5_WQE_DWORD_SIZE); + if (ds_cnt > MLX5_DSEG_MAX) { + unsigned int max_inline = (MLX5_DSEG_MAX - 2) * + MLX5_WQE_DWORD_SIZE; + + max_inline = max_inline - (max_inline % + RTE_CACHE_LINE_SIZE); + WARN("txq inline is too large (%d) setting it to " + "the maximum possible: %d\n", + priv->txq_inline, max_inline); + tmpl.txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; + attr.init.cap.max_inline_data = max_inline; + } } - tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + if (priv->tso) { + attr.init.max_tso_header = + max_tso_inline * RTE_CACHE_LINE_SIZE; + attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; + tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline, + max_tso_inline); + tmpl.txq.tso_en = 1; + } + if (priv->tunnel_en) + tmpl.txq.tunnel_en = 1; + tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { ret = (errno ? errno : EINVAL); ERROR("%p: QP creation failure: %s", (void *)dev, strerror(ret)); goto error; } - attr.mod = (struct ibv_exp_qp_attr){ + DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u," + " max_inline_data=%u", + attr.init.cap.max_send_wr, + attr.init.cap.max_send_sge, + attr.init.cap.max_inline_data); + attr.mod = (struct ibv_qp_attr){ /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ .port_num = priv->port }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, - (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT)); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, + (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { ERROR("%p: QP state to IBV_QPS_INIT failed: %s", (void *)dev, strerror(ret)); @@ -363,63 +404,33 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, goto error; } txq_alloc_elts(&tmpl, desc); - attr.mod = (struct ibv_exp_qp_attr){ + attr.mod = (struct ibv_qp_attr){ .qp_state = IBV_QPS_RTR }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTR failed: %s", (void *)dev, strerror(ret)); goto error; } attr.mod.qp_state = IBV_QPS_RTS; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTS failed: %s", (void *)dev, strerror(ret)); goto error; } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.cq, - }; - tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_cq == NULL) { - ret = EINVAL; - ERROR("%p: CQ interface family query failed with status %d", - (void *)dev, status); - goto error; - } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_QP_BURST, - .intf_version = 1, - .obj = tmpl.qp, - /* Enable multi-packet send if supported. */ - .family_flags = - ((priv->mps && !priv->sriov) ? - IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR : - 0), - }; - tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_qp == NULL) { - ret = EINVAL; - ERROR("%p: QP interface family query failed with status %d", - (void *)dev, status); - goto error; - } /* Clean up txq in case we're reinitializing it. */ DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl); - txq_cleanup(txq_ctrl); + mlx5_txq_cleanup(txq_ctrl); *txq_ctrl = tmpl; DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl); /* Pre-register known mempools. */ - rte_mempool_walk(txq_mp2mr_iter, txq_ctrl); + rte_mempool_walk(mlx5_txq_mp2mr_iter, txq_ctrl); assert(ret == 0); return 0; error: - txq_cleanup(&tmpl); + mlx5_txq_cleanup(&tmpl); assert(ret > 0); return ret; } @@ -446,8 +457,9 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf) { struct priv *priv = dev->data->dev_private; - struct txq *txq = (*priv->txqs)[idx]; - struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); + struct mlx5_txq_data *txq = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); int ret; if (mlx5_is_secondary()) @@ -478,12 +490,25 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (txq != NULL) { DEBUG("%p: reusing already allocated queue index %u (%p)", (void *)dev, idx, (void *)txq); - if (priv->started) { + if (dev->data->dev_started) { priv_unlock(priv); return -EEXIST; } (*priv->txqs)[idx] = NULL; - txq_cleanup(txq_ctrl); + mlx5_txq_cleanup(txq_ctrl); + /* Resize if txq size is changed. */ + if (txq_ctrl->txq.elts_n != log2above(desc)) { + txq_ctrl = rte_realloc(txq_ctrl, + sizeof(*txq_ctrl) + + desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE); + if (!txq_ctrl) { + ERROR("%p: unable to reallocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } } else { txq_ctrl = rte_calloc_socket("TXQ", 1, @@ -497,7 +522,7 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -ENOMEM; } } - ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf); + ret = mlx5_txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf); if (ret) rte_free(txq_ctrl); else { @@ -505,8 +530,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, DEBUG("%p: adding TX queue %p to list", (void *)dev, (void *)txq_ctrl); (*priv->txqs)[idx] = &txq_ctrl->txq; - /* Update send callback. */ - priv_select_tx_function(priv); } priv_unlock(priv); return -ret; @@ -521,8 +544,8 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, void mlx5_tx_queue_release(void *dpdk_txq) { - struct txq *txq = (struct txq *)dpdk_txq; - struct txq_ctrl *txq_ctrl; + struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; + struct mlx5_txq_ctrl *txq_ctrl; struct priv *priv; unsigned int i; @@ -531,7 +554,7 @@ mlx5_tx_queue_release(void *dpdk_txq) if (txq == NULL) return; - txq_ctrl = container_of(txq, struct txq_ctrl, txq); + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); priv = txq_ctrl->priv; priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) @@ -541,48 +564,63 @@ mlx5_tx_queue_release(void *dpdk_txq) (*priv->txqs)[i] = NULL; break; } - txq_cleanup(txq_ctrl); + mlx5_txq_cleanup(txq_ctrl); rte_free(txq_ctrl); priv_unlock(priv); } + /** - * DPDK callback for TX in secondary processes. - * - * This function configures all queues from primary process information - * if necessary before reverting to the normal TX burst callback. + * Map locally UAR used in Tx queues for BlueFlame doorbell. * - * @param dpdk_txq - * Generic pointer to TX queue structure. - * @param[in] pkts - * Packets to transmit. - * @param pkts_n - * Number of packets in array. + * @param[in] priv + * Pointer to private structure. + * @param fd + * Verbs file descriptor to map UAR pages. * * @return - * Number of packets successfully transmitted (<= pkts_n). + * 0 on success, errno value on failure. */ -uint16_t -mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, - uint16_t pkts_n) +int +priv_tx_uar_remap(struct priv *priv, int fd) { - struct txq *txq = dpdk_txq; - struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); - struct priv *priv = mlx5_secondary_data_setup(txq_ctrl->priv); - struct priv *primary_priv; - unsigned int index; + unsigned int i, j; + uintptr_t pages[priv->txqs_n]; + unsigned int pages_n = 0; + uintptr_t uar_va; + void *addr; + struct mlx5_txq_data *txq; + struct mlx5_txq_ctrl *txq_ctrl; + int already_mapped; + size_t page_size = sysconf(_SC_PAGESIZE); - if (priv == NULL) - return 0; - primary_priv = - mlx5_secondary_data[priv->dev->data->port_id].primary_priv; - /* Look for queue index in both private structures. */ - for (index = 0; index != priv->txqs_n; ++index) - if (((*primary_priv->txqs)[index] == txq) || - ((*priv->txqs)[index] == txq)) - break; - if (index == priv->txqs_n) - return 0; - txq = (*priv->txqs)[index]; - return priv->dev->tx_pkt_burst(txq, pkts, pkts_n); + /* + * As rdma-core, UARs are mapped in size of OS page size. + * Use aligned address to avoid duplicate mmap. + * Ref to libmlx5 function: mlx5_init_context() + */ + for (i = 0; i != priv->txqs_n; ++i) { + txq = (*priv->txqs)[i]; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + uar_va = (uintptr_t)txq_ctrl->txq.bf_reg; + uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); + already_mapped = 0; + for (j = 0; j != pages_n; ++j) { + if (pages[j] == uar_va) { + already_mapped = 1; + break; + } + } + if (already_mapped) + continue; + pages[pages_n++] = uar_va; + addr = mmap((void *)uar_va, page_size, + PROT_WRITE, MAP_FIXED | MAP_SHARED, fd, + txq_ctrl->uar_mmap_offset); + if (addr != (void *)uar_va) { + ERROR("call to mmap failed on UAR for txq %d\n", i); + return -1; + } + } + return 0; }