X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=57bc1164501667dc87b6360e08510808f2221ae8;hb=1b7b9f170fcebbbd0708fab554dcb5a7badef8cf;hp=dfc379c0765a63e567f4c56d88fd7b927b859f17;hpb=ae18a1ae969262cc87f859497164a45285f13995;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index dfc379c076..57bc116450 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include @@ -18,6 +17,7 @@ #pragma GCC diagnostic ignored "-Wpedantic" #endif #include +#include #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif @@ -27,12 +27,15 @@ #include #include -#include "mlx5_utils.h" +#include +#include +#include + #include "mlx5_defs.h" +#include "mlx5_utils.h" #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" -#include "mlx5_glue.h" /** * Allocate TX queue elements. @@ -61,7 +64,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) * @param txq_ctrl * Pointer to TX queue structure. */ -static void +void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) { const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n; @@ -79,9 +82,9 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) while (elts_tail != elts_head) { struct rte_mbuf *elt = (*elts)[elts_tail & elts_m]; - assert(elt != NULL); + MLX5_ASSERT(elt != NULL); rte_pktmbuf_free_seg(elt); -#ifndef NDEBUG +#ifdef RTE_LIBRTE_MLX5_DEBUG /* Poisoning. */ memset(&(*elts)[elts_tail & elts_m], 0x77, @@ -126,12 +129,9 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (config->tso) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO); + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); } -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - if (config->dv_flow_en) - offloads |= DEV_TX_OFFLOAD_MATCH_METADATA; -#endif return offloads; } @@ -274,7 +274,6 @@ mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", dev->data->port_id, idx); (*priv->txqs)[idx] = &txq_ctrl->txq; - txq_ctrl->type = MLX5_TXQ_TYPE_HAIRPIN; return 0; } @@ -305,6 +304,30 @@ mlx5_tx_queue_release(void *dpdk_txq) } } +/** + * Configure the doorbell register non-cached attribute. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + * @param page_size + * Systme page size + */ +static void +txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size) +{ + struct mlx5_priv *priv = txq_ctrl->priv; + off_t cmd; + + txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC; + txq_ctrl->txq.db_nc = 0; + /* Check the doorbell register mapping type. */ + cmd = txq_ctrl->uar_mmap_offset / page_size; + cmd >>= MLX5_UAR_MMAP_CMD_SHIFT; + cmd &= MLX5_UAR_MMAP_CMD_MASK; + if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD) + txq_ctrl->txq.db_nc = 1; +} + /** * Initialize Tx UAR registers for primary process. * @@ -316,16 +339,17 @@ txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl) { struct mlx5_priv *priv = txq_ctrl->priv; struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); + const size_t page_size = sysconf(_SC_PAGESIZE); #ifndef RTE_ARCH_64 unsigned int lock_idx; - const size_t page_size = sysconf(_SC_PAGESIZE); #endif if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD) return; - assert(rte_eal_process_type() == RTE_PROC_PRIMARY); - assert(ppriv); + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + MLX5_ASSERT(ppriv); ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg; + txq_uar_ncattr_init(txq_ctrl, page_size); #ifndef RTE_ARCH_64 /* Assign an UAR lock according to UAR page number */ lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & @@ -361,7 +385,7 @@ txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd) if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD) return 0; - assert(ppriv); + MLX5_ASSERT(ppriv); /* * As rdma-core, UARs are mapped in size of OS page * size. Ref to libmlx5 function: mlx5_init_context() @@ -379,6 +403,7 @@ txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd) } addr = RTE_PTR_ADD(addr, offset); ppriv->uar_table[txq->idx] = addr; + txq_uar_ncattr_init(txq_ctrl, page_size); return 0; } @@ -421,7 +446,7 @@ mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd) unsigned int i; int ret; - assert(rte_eal_process_type() == RTE_PROC_SECONDARY); + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); for (i = 0; i != priv->txqs_n; ++i) { if (!(*priv->txqs)[i]) continue; @@ -429,7 +454,7 @@ mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd) txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD) continue; - assert(txq->idx == (uint16_t)i); + MLX5_ASSERT(txq->idx == (uint16_t)i); ret = txq_uar_init_secondary(txq_ctrl, fd); if (ret) goto error; @@ -468,9 +493,10 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) struct mlx5_devx_create_sq_attr attr = { 0 }; struct mlx5_txq_obj *tmpl = NULL; int ret = 0; + uint32_t max_wq_data; - assert(txq_data); - assert(!txq_ctrl->obj); + MLX5_ASSERT(txq_data); + MLX5_ASSERT(!txq_ctrl->obj); tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, txq_ctrl->socket); if (!tmpl) { @@ -484,11 +510,15 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) tmpl->txq_ctrl = txq_ctrl; attr.hairpin = 1; attr.tis_lst_sz = 1; - /* Workaround for hairpin startup */ - attr.wq_attr.log_hairpin_num_packets = log2above(32); - /* Workaround for packets larger than 1KB */ + max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; + /* Jumbo frames > 9KB should be supported, and more packets. */ attr.wq_attr.log_hairpin_data_sz = - priv->config.hca_attr.log_max_hairpin_wq_data_sz; + (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? + max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; + /* Set the packets number to the maximum value for performance. */ + attr.wq_attr.log_hairpin_num_packets = + attr.wq_attr.log_hairpin_data_sz - + MLX5_HAIRPIN_QUEUE_STRIDE; attr.tis_num = priv->sh->tis->id; tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); if (!tmpl->sq) { @@ -555,7 +585,7 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, if (priv->config.devx && !priv->sh->tdn) qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES; #endif - assert(txq_data); + MLX5_ASSERT(txq_data); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { @@ -694,13 +724,22 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, txq_data->cq_db = cq_info.dbrec; txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf; txq_data->cq_ci = 0; -#ifndef NDEBUG txq_data->cq_pi = 0; -#endif txq_data->wqe_ci = 0; txq_data->wqe_pi = 0; txq_data->wqe_comp = 0; txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; + txq_data->fcqs = rte_calloc_socket(__func__, + txq_data->cqe_s, + sizeof(*txq_data->fcqs), + RTE_CACHE_LINE_SIZE, + txq_ctrl->socket); + if (!txq_data->fcqs) { + DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)", + dev->data->port_id, idx); + rte_errno = ENOMEM; + goto error; + } #ifdef HAVE_IBV_FLOW_DV_SUPPORT /* * If using DevX need to query and store TIS transport domain value. @@ -749,6 +788,8 @@ error: claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); + if (txq_data && txq_data->fcqs) + rte_free(txq_data->fcqs); if (txq_obj) rte_free(txq_obj); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -795,7 +836,7 @@ mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx) int mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj) { - assert(txq_obj); + MLX5_ASSERT(txq_obj); if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) { if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) { if (txq_obj->tis) @@ -803,6 +844,8 @@ mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj) } else { claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); + if (txq_obj->txq_ctrl->txq.fcqs) + rte_free(txq_obj->txq_ctrl->txq.fcqs); } LIST_REMOVE(txq_obj, next); rte_free(txq_obj); @@ -934,13 +977,13 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) (unsigned int)config->txq_inline_mpw; inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ? 0 : (unsigned int)config->txq_inline_min; - if (config->mps != MLX5_MPW_ENHANCED) + if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW) inlen_empw = 0; /* * If there is requested minimal amount of data to inline * we MUST enable inlining. This is a case for ConnectX-4 * which usually requires L2 inlined for correct operating - * and ConnectX-4LX which requires L2-L4 inlined to + * and ConnectX-4 Lx which requires L2-L4 inlined to * support E-Switch Flows. */ if (inlen_mode) { @@ -1011,12 +1054,12 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * beginning of inlining buffer in Ethernet * Segment. */ - assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); - assert(inlen_send <= MLX5_WQE_SIZE_MAX + - MLX5_ESEG_MIN_INLINE_SIZE - - MLX5_WQE_CSEG_SIZE - - MLX5_WQE_ESEG_SIZE - - MLX5_WQE_DSEG_SIZE * 2); + MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX + + MLX5_ESEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE * 2); } else if (inlen_mode) { /* * If minimal inlining is requested we must @@ -1066,12 +1109,12 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) PORT_ID(priv), inlen_empw, temp); inlen_empw = temp; } - assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE); - assert(inlen_empw <= MLX5_WQE_SIZE_MAX + - MLX5_DSEG_MIN_INLINE_SIZE - - MLX5_WQE_CSEG_SIZE - - MLX5_WQE_ESEG_SIZE - - MLX5_WQE_DSEG_SIZE); + MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX + + MLX5_DSEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE); txq_ctrl->txq.inlen_empw = inlen_empw; } txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw); @@ -1186,10 +1229,11 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl) } txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send, txq_ctrl->txq.inlen_empw); - assert(txq_ctrl->max_inline_data <= max_inline); - assert(txq_ctrl->txq.inlen_mode <= max_inline); - assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send); - assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw); + MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline); + MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline); + MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send); + MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw || + !txq_ctrl->txq.inlen_empw); return 0; error: rte_errno = ENOMEM; @@ -1235,7 +1279,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } /* Save pointer of global generation number to check memory event. */ tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen; - assert(desc > MLX5_TX_COMP_THRESH); + MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH); tmpl->txq.offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; tmpl->priv = priv;