X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=9deaa7ea30f7f3b55fb28256e5ee0cb2a1730f43;hb=6a6b6828fe6a08a9ab3cd91c2d21e23312095554;hp=1899850db4b6514f5cb97ee934b36f620cd7998c;hpb=f8fb87d51f0f69a9bb7eee436a331aa93336c9fa;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 1899850db4..9deaa7ea30 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -65,23 +65,15 @@ * * @param txq_ctrl * Pointer to TX queue structure. - * @param elts_n - * Number of elements to allocate. */ -static void -txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl, unsigned int elts_n) +void +txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) { + const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n; unsigned int i; for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; - for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) { - volatile struct mlx5_wqe64 *wqe = - (volatile struct mlx5_wqe64 *) - txq_ctrl->txq.wqes + i; - - memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe)); - } DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; @@ -124,114 +116,186 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) } /** - * Clean up a TX queue. + * DPDK callback to configure a TX queue. * - * Destroy objects, free allocated memory and reset the structure for reuse. + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. * - * @param txq_ctrl - * Pointer to TX queue structure. + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + int ret = 0; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + priv_lock(priv); + if (desc <= MLX5_TX_COMP_THRESH) { + WARN("%p: number of descriptors requested for TX queue %u" + " must be higher than MLX5_TX_COMP_THRESH, using" + " %u instead of %u", + (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); + desc = MLX5_TX_COMP_THRESH + 1; + } + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + WARN("%p: increased number of descriptors in TX queue %u" + " to the next power of two (%d)", + (void *)dev, idx, desc); + } + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= priv->txqs_n) { + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, priv->txqs_n); + priv_unlock(priv); + return -EOVERFLOW; + } + if (!mlx5_priv_txq_releasable(priv, idx)) { + ret = EBUSY; + ERROR("%p: unable to release queue index %u", + (void *)dev, idx); + goto out; + } + mlx5_priv_txq_release(priv, idx); + txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf); + if (!txq_ctrl) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + ret = ENOMEM; + goto out; + } + DEBUG("%p: adding TX queue %p to list", + (void *)dev, (void *)txq_ctrl); + (*priv->txqs)[idx] = &txq_ctrl->txq; +out: + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to release a TX queue. + * + * @param dpdk_txq + * Generic TX queue pointer. */ void -mlx5_txq_cleanup(struct mlx5_txq_ctrl *txq_ctrl) +mlx5_tx_queue_release(void *dpdk_txq) { - size_t i; - - DEBUG("cleaning up %p", (void *)txq_ctrl); - txq_free_elts(txq_ctrl); - if (txq_ctrl->qp != NULL) - claim_zero(ibv_destroy_qp(txq_ctrl->qp)); - if (txq_ctrl->cq != NULL) - claim_zero(ibv_destroy_cq(txq_ctrl->cq)); - for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) - if (txq_ctrl->txq.mp2mr[i]) - priv_mr_release(txq_ctrl->priv, txq_ctrl->txq.mp2mr[i]); - memset(txq_ctrl, 0, sizeof(*txq_ctrl)); + struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; + struct mlx5_txq_ctrl *txq_ctrl; + struct priv *priv; + unsigned int i; + + if (mlx5_is_secondary()) + return; + + if (txq == NULL) + return; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + priv = txq_ctrl->priv; + priv_lock(priv); + for (i = 0; (i != priv->txqs_n); ++i) + if ((*priv->txqs)[i] == txq) { + DEBUG("%p: removing TX queue %p from list", + (void *)priv->dev, (void *)txq_ctrl); + mlx5_priv_txq_release(priv, i); + break; + } + priv_unlock(priv); } + /** - * Initialize TX queue. + * Map locally UAR used in Tx queues for BlueFlame doorbell. * - * @param tmpl - * Pointer to TX queue control template. - * @param txq_ctrl - * Pointer to TX queue control. + * @param[in] priv + * Pointer to private structure. + * @param fd + * Verbs file descriptor to map UAR pages. * * @return * 0 on success, errno value on failure. */ -static inline int -txq_setup(struct mlx5_txq_ctrl *tmpl, struct mlx5_txq_ctrl *txq_ctrl) +int +priv_tx_uar_remap(struct priv *priv, int fd) { - struct mlx5dv_qp qp; - struct ibv_cq *ibcq = tmpl->cq; - struct mlx5dv_cq cq_info; - struct mlx5dv_obj obj; - int ret = 0; + unsigned int i, j; + uintptr_t pages[priv->txqs_n]; + unsigned int pages_n = 0; + uintptr_t uar_va; + void *addr; + struct mlx5_txq_data *txq; + struct mlx5_txq_ctrl *txq_ctrl; + int already_mapped; + size_t page_size = sysconf(_SC_PAGESIZE); - qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET; - obj.cq.in = ibcq; - obj.cq.out = &cq_info; - obj.qp.in = tmpl->qp; - obj.qp.out = &qp; - ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); - if (ret != 0) { - return -EINVAL; - } - if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - ERROR("Wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", RTE_CACHE_LINE_SIZE); - return EINVAL; - } - tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt); - tmpl->txq.qp_num_8s = tmpl->qp->qp_num << 8; - tmpl->txq.wqes = qp.sq.buf; - tmpl->txq.wqe_n = log2above(qp.sq.wqe_cnt); - tmpl->txq.qp_db = &qp.dbrec[MLX5_SND_DBR]; - tmpl->txq.bf_reg = qp.bf.reg; - tmpl->txq.cq_db = cq_info.dbrec; - tmpl->txq.cqes = - (volatile struct mlx5_cqe (*)[]) - (uintptr_t)cq_info.buf; - tmpl->txq.elts = - (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n]) - ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); - if (qp.comp_mask | MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { - tmpl->uar_mmap_offset = qp.uar_mmap_offset; - } else { - ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); - return EINVAL; + /* + * As rdma-core, UARs are mapped in size of OS page size. + * Use aligned address to avoid duplicate mmap. + * Ref to libmlx5 function: mlx5_init_context() + */ + for (i = 0; i != priv->txqs_n; ++i) { + txq = (*priv->txqs)[i]; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + uar_va = (uintptr_t)txq_ctrl->txq.bf_reg; + uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); + already_mapped = 0; + for (j = 0; j != pages_n; ++j) { + if (pages[j] == uar_va) { + already_mapped = 1; + break; + } + } + if (already_mapped) + continue; + pages[pages_n++] = uar_va; + addr = mmap((void *)uar_va, page_size, + PROT_WRITE, MAP_FIXED | MAP_SHARED, fd, + txq_ctrl->uar_mmap_offset); + if (addr != (void *)uar_va) { + ERROR("call to mmap failed on UAR for txq %d\n", i); + return -1; + } } - return 0; } /** - * Configure a TX queue. + * Create the Tx queue Verbs object. * - * @param dev - * Pointer to Ethernet device structure. - * @param txq_ctrl - * Pointer to TX queue structure. - * @param desc - * Number of descriptors to configure in queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param[in] conf - * Thresholds parameters. + * @param priv + * Pointer to private structure. + * @param idx + * Queue index in DPDK Rx queue array * * @return - * 0 on success, errno value on failure. + * The Verbs object initialised if it can be created. */ -int -mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl, - uint16_t desc, unsigned int socket, - const struct rte_eth_txconf *conf) +struct mlx5_txq_ibv* +mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) { - struct priv *priv = mlx5_get_priv(dev); - struct mlx5_txq_ctrl tmpl = { - .priv = priv, - .socket = socket, - }; + struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq_data, struct mlx5_txq_ctrl, txq); + struct mlx5_txq_ibv tmpl; + struct mlx5_txq_ibv *txq_ibv; union { struct ibv_qp_init_attr_ex init; struct ibv_cq_init_attr_ex cq; @@ -239,21 +303,18 @@ mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl, struct ibv_cq_ex cq_attr; } attr; unsigned int cqe_n; - const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER + - (RTE_CACHE_LINE_SIZE - 1)) / - RTE_CACHE_LINE_SIZE); + struct mlx5dv_qp qp; + struct mlx5dv_cq cq_info; + struct mlx5dv_obj obj; + const int desc = 1 << txq_data->elts_n; int ret = 0; + assert(txq_data); if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { - ret = ENOTSUP; ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); goto error; } - tmpl.txq.flags = conf->txq_flags; - assert(desc > MLX5_TX_COMP_THRESH); - tmpl.txq.elts_n = log2above(desc); - if (priv->mps == MLX5_MPW_ENHANCED) - tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; + memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv)); /* MRs will be registered in mp2mr[] later. */ attr.cq = (struct ibv_cq_init_attr_ex){ .comp_mask = 0, @@ -262,19 +323,11 @@ mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl, ((desc / MLX5_TX_COMP_THRESH) - 1) : 1; if (priv->mps == MLX5_MPW_ENHANCED) cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; - tmpl.cq = ibv_create_cq(priv->ctx, - cqe_n, - NULL, NULL, 0); + tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0); if (tmpl.cq == NULL) { - ret = ENOMEM; - ERROR("%p: CQ creation failure: %s", - (void *)dev, strerror(ret)); + ERROR("%p: CQ creation failure", (void *)txq_ctrl); goto error; } - DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.orig_attr.max_qp_wr); - DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.orig_attr.max_sge); attr.init = (struct ibv_qp_init_attr_ex){ /* CQ to be associated with the send queue. */ .send_cq = tmpl.cq, @@ -283,9 +336,10 @@ mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl, .cap = { /* Max number of outstanding WRs. */ .max_send_wr = - ((priv->device_attr.orig_attr.max_qp_wr < desc) ? - priv->device_attr.orig_attr.max_qp_wr : - desc), + ((priv->device_attr.orig_attr.max_qp_wr < + desc) ? + priv->device_attr.orig_attr.max_qp_wr : + desc), /* * Max number of scatter/gather elements in a WR, * must be 1 to prevent libmlx5 from trying to affect @@ -296,329 +350,434 @@ mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl, .max_send_sge = 1, }, .qp_type = IBV_QPT_RAW_PACKET, - /* Do *NOT* enable this, completions events are managed per - * TX burst. */ + /* + * Do *NOT* enable this, completions events are managed per + * Tx burst. + */ .sq_sig_all = 0, .pd = priv->pd, .comp_mask = IBV_QP_INIT_ATTR_PD, }; + if (txq_data->inline_en) + attr.init.cap.max_inline_data = txq_ctrl->max_inline_data; + if (txq_data->tso_en) { + attr.init.max_tso_header = txq_ctrl->max_tso_header; + attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; + } + tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init); + if (tmpl.qp == NULL) { + ERROR("%p: QP creation failure", (void *)txq_ctrl); + goto error; + } + attr.mod = (struct ibv_qp_attr){ + /* Move the QP to this state. */ + .qp_state = IBV_QPS_INIT, + /* Primary port number. */ + .port_num = priv->port + }; + ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); + if (ret) { + ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl); + goto error; + } + attr.mod = (struct ibv_qp_attr){ + .qp_state = IBV_QPS_RTR + }; + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); + if (ret) { + ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl); + goto error; + } + attr.mod.qp_state = IBV_QPS_RTS; + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); + if (ret) { + ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl); + goto error; + } + txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0, + txq_ctrl->socket); + if (!txq_ibv) { + ERROR("%p: cannot allocate memory", (void *)txq_ctrl); + goto error; + } + obj.cq.in = tmpl.cq; + obj.cq.out = &cq_info; + obj.qp.in = tmpl.qp; + obj.qp.out = &qp; + ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); + if (ret != 0) + goto error; + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { + ERROR("Wrong MLX5_CQE_SIZE environment variable value: " + "it should be set to %u", RTE_CACHE_LINE_SIZE); + goto error; + } + txq_data->cqe_n = log2above(cq_info.cqe_cnt); + txq_data->qp_num_8s = tmpl.qp->qp_num << 8; + txq_data->wqes = qp.sq.buf; + txq_data->wqe_n = log2above(qp.sq.wqe_cnt); + txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR]; + txq_data->bf_reg = qp.bf.reg; + txq_data->cq_db = cq_info.dbrec; + txq_data->cqes = + (volatile struct mlx5_cqe (*)[]) + (uintptr_t)cq_info.buf; + txq_data->cq_ci = 0; + txq_data->cq_pi = 0; + txq_data->wqe_ci = 0; + txq_data->wqe_pi = 0; + txq_ibv->qp = tmpl.qp; + txq_ibv->cq = tmpl.cq; + rte_atomic32_inc(&txq_ibv->refcnt); + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); + LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); + return txq_ibv; +error: + if (tmpl.cq) + claim_zero(ibv_destroy_cq(tmpl.cq)); + if (tmpl.qp) + claim_zero(ibv_destroy_qp(tmpl.qp)); + return NULL; +} + +/** + * Get an Tx queue Verbs object. + * + * @param priv + * Pointer to private structure. + * @param idx + * Queue index in DPDK Rx queue array + * + * @return + * The Verbs object if it exists. + */ +struct mlx5_txq_ibv* +mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) +{ + struct mlx5_txq_ctrl *txq_ctrl; + + if (idx >= priv->txqs_n) + return NULL; + if (!(*priv->txqs)[idx]) + return NULL; + txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + if (txq_ctrl->ibv) { + rte_atomic32_inc(&txq_ctrl->ibv->refcnt); + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + (void *)txq_ctrl->ibv, + rte_atomic32_read(&txq_ctrl->ibv->refcnt)); + } + return txq_ctrl->ibv; +} + +/** + * Release an Tx verbs queue object. + * + * @param priv + * Pointer to private structure. + * @param txq_ibv + * Verbs Tx queue object. + * + * @return + * 0 on success, errno on failure. + */ +int +mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +{ + (void)priv; + assert(txq_ibv); + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); + if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { + claim_zero(ibv_destroy_qp(txq_ibv->qp)); + claim_zero(ibv_destroy_cq(txq_ibv->cq)); + LIST_REMOVE(txq_ibv, next); + rte_free(txq_ibv); + return 0; + } + return EBUSY; +} + +/** + * Return true if a single reference exists on the object. + * + * @param priv + * Pointer to private structure. + * @param txq_ibv + * Verbs Tx queue object. + */ +int +mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +{ + (void)priv; + assert(txq_ibv); + return (rte_atomic32_read(&txq_ibv->refcnt) == 1); +} + +/** + * Verify the Verbs Tx queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_txq_ibv_verify(struct priv *priv) +{ + int ret = 0; + struct mlx5_txq_ibv *txq_ibv; + + LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { + DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv, + (void *)txq_ibv); + ++ret; + } + return ret; +} + +/** + * Create a DPDK Tx queue. + * + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * A DPDK queue object on success. + */ +struct mlx5_txq_ctrl* +mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_txconf *conf) +{ + const unsigned int max_tso_inline = + ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); + struct mlx5_txq_ctrl *tmpl; + + tmpl = rte_calloc_socket("TXQ", 1, + sizeof(*tmpl) + + desc * sizeof(struct rte_mbuf *), + 0, socket); + if (!tmpl) + return NULL; + assert(desc > MLX5_TX_COMP_THRESH); + tmpl->txq.flags = conf->txq_flags; + tmpl->priv = priv; + tmpl->txq.elts_n = log2above(desc); + if (priv->mps == MLX5_MPW_ENHANCED) + tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; + /* MRs will be registered in mp2mr[] later. */ + DEBUG("priv->device_attr.max_qp_wr is %d", + priv->device_attr.orig_attr.max_qp_wr); + DEBUG("priv->device_attr.max_sge is %d", + priv->device_attr.orig_attr.max_sge); if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) { unsigned int ds_cnt; - tmpl.txq.max_inline = + tmpl->txq.max_inline = ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE); - tmpl.txq.inline_en = 1; + tmpl->txq.inline_en = 1; /* TSO and MPS can't be enabled concurrently. */ assert(!priv->tso || !priv->mps); if (priv->mps == MLX5_MPW_ENHANCED) { - tmpl.txq.inline_max_packet_sz = + tmpl->txq.inline_max_packet_sz = priv->inline_max_packet_sz; /* To minimize the size of data set, avoid requesting * too large WQ. */ - attr.init.cap.max_inline_data = + tmpl->max_inline_data = ((RTE_MIN(priv->txq_inline, priv->inline_max_packet_sz) + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; } else if (priv->tso) { - int inline_diff = tmpl.txq.max_inline - max_tso_inline; + int inline_diff = tmpl->txq.max_inline - max_tso_inline; /* * Adjust inline value as Verbs aggregates * tso_inline and txq_inline fields. */ - attr.init.cap.max_inline_data = inline_diff > 0 ? - inline_diff * - RTE_CACHE_LINE_SIZE : - 0; + tmpl->max_inline_data = inline_diff > 0 ? + inline_diff * + RTE_CACHE_LINE_SIZE : + 0; } else { - attr.init.cap.max_inline_data = - tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; + tmpl->max_inline_data = + tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE; } /* * Check if the inline size is too large in a way which * can make the WQE DS to overflow. * Considering in calculation: - * WQE CTRL (1 DS) - * WQE ETH (1 DS) - * Inline part (N DS) + * WQE CTRL (1 DS) + * WQE ETH (1 DS) + * Inline part (N DS) */ - ds_cnt = 2 + - (attr.init.cap.max_inline_data / MLX5_WQE_DWORD_SIZE); + ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE); if (ds_cnt > MLX5_DSEG_MAX) { unsigned int max_inline = (MLX5_DSEG_MAX - 2) * - MLX5_WQE_DWORD_SIZE; + MLX5_WQE_DWORD_SIZE; max_inline = max_inline - (max_inline % RTE_CACHE_LINE_SIZE); WARN("txq inline is too large (%d) setting it to " "the maximum possible: %d\n", priv->txq_inline, max_inline); - tmpl.txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; - attr.init.cap.max_inline_data = max_inline; + tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; } } if (priv->tso) { - attr.init.max_tso_header = - max_tso_inline * RTE_CACHE_LINE_SIZE; - attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; - tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline, - max_tso_inline); - tmpl.txq.tso_en = 1; + tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE; + tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline, + max_tso_inline); + tmpl->txq.tso_en = 1; } if (priv->tunnel_en) - tmpl.txq.tunnel_en = 1; - tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init); - if (tmpl.qp == NULL) { - ret = (errno ? errno : EINVAL); - ERROR("%p: QP creation failure: %s", - (void *)dev, strerror(ret)); - goto error; - } - DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u," - " max_inline_data=%u", - attr.init.cap.max_send_wr, - attr.init.cap.max_send_sge, - attr.init.cap.max_inline_data); - attr.mod = (struct ibv_qp_attr){ - /* Move the QP to this state. */ - .qp_state = IBV_QPS_INIT, - /* Primary port number. */ - .port_num = priv->port - }; - ret = ibv_modify_qp(tmpl.qp, &attr.mod, - (IBV_QP_STATE | IBV_QP_PORT)); - if (ret) { - ERROR("%p: QP state to IBV_QPS_INIT failed: %s", - (void *)dev, strerror(ret)); - goto error; - } - ret = txq_setup(&tmpl, txq_ctrl); - if (ret) { - ERROR("%p: cannot initialize TX queue structure: %s", - (void *)dev, strerror(ret)); - goto error; - } - txq_alloc_elts(&tmpl, desc); - attr.mod = (struct ibv_qp_attr){ - .qp_state = IBV_QPS_RTR - }; - ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); - if (ret) { - ERROR("%p: QP state to IBV_QPS_RTR failed: %s", - (void *)dev, strerror(ret)); - goto error; - } - attr.mod.qp_state = IBV_QPS_RTS; - ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); - if (ret) { - ERROR("%p: QP state to IBV_QPS_RTS failed: %s", - (void *)dev, strerror(ret)); - goto error; - } - /* Clean up txq in case we're reinitializing it. */ - DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl); - mlx5_txq_cleanup(txq_ctrl); - *txq_ctrl = tmpl; - DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl); - /* Pre-register known mempools. */ - rte_mempool_walk(mlx5_txq_mp2mr_iter, txq_ctrl); - assert(ret == 0); - return 0; -error: - mlx5_txq_cleanup(&tmpl); - assert(ret > 0); - return ret; + tmpl->txq.tunnel_en = 1; + tmpl->txq.elts = + (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); + tmpl->txq.stats.idx = idx; + rte_atomic32_inc(&tmpl->refcnt); + DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); + return tmpl; } /** - * DPDK callback to configure a TX queue. + * Get a Tx queue. * - * @param dev - * Pointer to Ethernet device structure. + * @param priv + * Pointer to private structure. * @param idx * TX queue index. - * @param desc - * Number of descriptors to configure in queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param[in] conf - * Thresholds parameters. * * @return - * 0 on success, negative errno value on failure. + * A pointer to the queue if it exists. */ -int -mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_txconf *conf) +struct mlx5_txq_ctrl* +mlx5_priv_txq_get(struct priv *priv, uint16_t idx) { - struct priv *priv = dev->data->dev_private; - struct mlx5_txq_data *txq = (*priv->txqs)[idx]; - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); - int ret; + struct mlx5_txq_ctrl *ctrl = NULL; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; + if ((*priv->txqs)[idx]) { + ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, + txq); + unsigned int i; - priv_lock(priv); - if (desc <= MLX5_TX_COMP_THRESH) { - WARN("%p: number of descriptors requested for TX queue %u" - " must be higher than MLX5_TX_COMP_THRESH, using" - " %u instead of %u", - (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); - desc = MLX5_TX_COMP_THRESH + 1; - } - if (!rte_is_power_of_2(desc)) { - desc = 1 << log2above(desc); - WARN("%p: increased number of descriptors in TX queue %u" - " to the next power of two (%d)", - (void *)dev, idx, desc); - } - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); - if (idx >= priv->txqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->txqs_n); - priv_unlock(priv); - return -EOVERFLOW; - } - if (txq != NULL) { - DEBUG("%p: reusing already allocated queue index %u (%p)", - (void *)dev, idx, (void *)txq); - if (dev->data->dev_started) { - priv_unlock(priv); - return -EEXIST; - } - (*priv->txqs)[idx] = NULL; - mlx5_txq_cleanup(txq_ctrl); - /* Resize if txq size is changed. */ - if (txq_ctrl->txq.elts_n != log2above(desc)) { - txq_ctrl = rte_realloc(txq_ctrl, - sizeof(*txq_ctrl) + - desc * sizeof(struct rte_mbuf *), - RTE_CACHE_LINE_SIZE); - if (!txq_ctrl) { - ERROR("%p: unable to reallocate queue index %u", - (void *)dev, idx); - priv_unlock(priv); - return -ENOMEM; + mlx5_priv_txq_ibv_get(priv, idx); + for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { + struct mlx5_mr *mr = NULL; + + (void)mr; + if (ctrl->txq.mp2mr[i]) { + mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); + assert(mr); } } - } else { - txq_ctrl = - rte_calloc_socket("TXQ", 1, - sizeof(*txq_ctrl) + - desc * sizeof(struct rte_mbuf *), - 0, socket); - if (txq_ctrl == NULL) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - priv_unlock(priv); - return -ENOMEM; - } - } - ret = mlx5_txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf); - if (ret) - rte_free(txq_ctrl); - else { - txq_ctrl->txq.stats.idx = idx; - DEBUG("%p: adding TX queue %p to list", - (void *)dev, (void *)txq_ctrl); - (*priv->txqs)[idx] = &txq_ctrl->txq; + rte_atomic32_inc(&ctrl->refcnt); + DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + (void *)ctrl, rte_atomic32_read(&ctrl->refcnt)); } - priv_unlock(priv); - return -ret; + return ctrl; } /** - * DPDK callback to release a TX queue. + * Release a Tx queue. * - * @param dpdk_txq - * Generic TX queue pointer. + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. + * + * @return + * 0 on success, errno on failure. */ -void -mlx5_tx_queue_release(void *dpdk_txq) +int +mlx5_priv_txq_release(struct priv *priv, uint16_t idx) { - struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; - struct mlx5_txq_ctrl *txq_ctrl; - struct priv *priv; unsigned int i; + struct mlx5_txq_ctrl *txq; - if (mlx5_is_secondary()) - return; + if (!(*priv->txqs)[idx]) + return 0; + txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + (void *)txq, rte_atomic32_read(&txq->refcnt)); + if (txq->ibv) { + int ret; - if (txq == NULL) - return; - txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - priv = txq_ctrl->priv; - priv_lock(priv); - for (i = 0; (i != priv->txqs_n); ++i) - if ((*priv->txqs)[i] == txq) { - DEBUG("%p: removing TX queue %p from list", - (void *)priv->dev, (void *)txq_ctrl); - (*priv->txqs)[i] = NULL; - break; + ret = mlx5_priv_txq_ibv_release(priv, txq->ibv); + if (!ret) + txq->ibv = NULL; + } + for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { + if (txq->txq.mp2mr[i]) { + priv_mr_release(priv, txq->txq.mp2mr[i]); + txq->txq.mp2mr[i] = NULL; } - mlx5_txq_cleanup(txq_ctrl); - rte_free(txq_ctrl); - priv_unlock(priv); + } + if (rte_atomic32_dec_and_test(&txq->refcnt)) { + txq_free_elts(txq); + LIST_REMOVE(txq, next); + rte_free(txq); + (*priv->txqs)[idx] = NULL; + return 0; + } + return EBUSY; } - /** - * Map locally UAR used in Tx queues for BlueFlame doorbell. + * Verify if the queue can be released. * - * @param[in] priv + * @param priv * Pointer to private structure. - * @param fd - * Verbs file descriptor to map UAR pages. + * @param idx + * TX queue index. * * @return - * 0 on success, errno value on failure. + * 1 if the queue can be released. */ int -priv_tx_uar_remap(struct priv *priv, int fd) +mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) { - unsigned int i, j; - uintptr_t pages[priv->txqs_n]; - unsigned int pages_n = 0; - uintptr_t uar_va; - void *addr; - struct mlx5_txq_data *txq; - struct mlx5_txq_ctrl *txq_ctrl; - int already_mapped; - size_t page_size = sysconf(_SC_PAGESIZE); + struct mlx5_txq_ctrl *txq; - /* - * As rdma-core, UARs are mapped in size of OS page size. - * Use aligned address to avoid duplicate mmap. - * Ref to libmlx5 function: mlx5_init_context() - */ - for (i = 0; i != priv->txqs_n; ++i) { - txq = (*priv->txqs)[i]; - txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - uar_va = (uintptr_t)txq_ctrl->txq.bf_reg; - uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); - already_mapped = 0; - for (j = 0; j != pages_n; ++j) { - if (pages[j] == uar_va) { - already_mapped = 1; - break; - } - } - if (already_mapped) - continue; - pages[pages_n++] = uar_va; - addr = mmap((void *)uar_va, page_size, - PROT_WRITE, MAP_FIXED | MAP_SHARED, fd, - txq_ctrl->uar_mmap_offset); - if (addr != (void *)uar_va) { - ERROR("call to mmap failed on UAR for txq %d\n", i); - return -1; - } + if (!(*priv->txqs)[idx]) + return -1; + txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + return (rte_atomic32_read(&txq->refcnt) == 1); +} + +/** + * Verify the Tx Queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_txq_verify(struct priv *priv) +{ + struct mlx5_txq_ctrl *txq; + int ret = 0; + + LIST_FOREACH(txq, &priv->txqsctrl, next) { + DEBUG("%p: Tx Queue %p still referenced", (void *)priv, + (void *)txq); + ++ret; } - return 0; + return ret; }