From 301271bc1609196cf1b7f564f5f4ba8fd9e97ea5 Mon Sep 17 00:00:00 2001 From: =?utf8?q?N=C3=A9lio=20Laranjeiro?= Date: Wed, 23 Aug 2017 10:15:06 +0200 Subject: [PATCH] net/mlx5: prepare vector Rx ring at setup time To use the vector, it needs to add to the PMD Rx mbuf ring four extra mbuf to avoid memory corruption. This additional mbuf are added on dev_start() whereas all other mbuf are allocated on queue setup. This patch brings this allocation back to the same place as other mbuf allocation. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5_ethdev.c | 1 - drivers/net/mlx5/mlx5_rxq.c | 43 ++++++++++++++++++++++------ drivers/net/mlx5/mlx5_rxtx.c | 6 ---- drivers/net/mlx5/mlx5_rxtx.h | 1 - drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 38 ------------------------ 5 files changed, 34 insertions(+), 55 deletions(-) diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index b0eb3cdfcd..014edc78de 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -1540,7 +1540,6 @@ void priv_select_rx_function(struct priv *priv) { if (priv_check_vec_rx_support(priv) > 0) { - priv_prep_vec_rx_function(priv); priv->dev->rx_pkt_burst = mlx5_rx_burst_vec; DEBUG("selected RX vectorized function"); } else { diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 550e648be9..de54175a1c 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -711,6 +711,27 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n) }; (*rxq_ctrl->rxq.elts)[i] = buf; } + if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + struct rxq *rxq = &rxq_ctrl->rxq; + struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; + + assert(rxq->elts_n == rxq->cqe_n); + /* Initialize default rearm_data for vPMD. */ + mbuf_init->data_off = RTE_PKTMBUF_HEADROOM; + rte_mbuf_refcnt_set(mbuf_init, 1); + mbuf_init->nb_segs = 1; + mbuf_init->port = rxq->port_id; + /* + * prevent compiler reordering: + * rearm_data covers previous fields. + */ + rte_compiler_barrier(); + rxq->mbuf_initializer = *(uint64_t *)&mbuf_init->rearm_data; + /* Padding with a fake mbuf for vectorized Rx. */ + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + (*rxq->elts)[elts_n + i] = &rxq->fake_mbuf; + rxq->trim_elts = 1; + } DEBUG("%p: allocated and configured %u segments (max %u packets)", (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); assert(ret == 0); @@ -791,9 +812,11 @@ rxq_setup(struct rxq_ctrl *tmpl) struct ibv_cq *ibcq = tmpl->cq; struct ibv_mlx5_cq_info cq_info; struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq); - struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] = + const uint16_t desc_n = + (1 << tmpl->rxq.elts_n) + tmpl->priv->rx_vec_en * + MLX5_VPMD_DESCS_PER_LOOP; + struct rte_mbuf *(*elts)[desc_n] = rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket); - if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) { ERROR("Unable to query CQ info. check your OFED."); return ENOTSUP; @@ -863,7 +886,9 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, } attr; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); unsigned int cqe_n = desc - 1; - struct rte_mbuf *(*elts)[desc] = NULL; + const uint16_t desc_n = + desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + struct rte_mbuf *(*elts)[desc_n] = NULL; int ret = 0; (void)conf; /* Thresholds configuration (ignored). */ @@ -1114,7 +1139,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct priv *priv = dev->data->dev_private; struct rxq *rxq = (*priv->rxqs)[idx]; struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); - const uint16_t desc_pad = MLX5_VPMD_DESCS_PER_LOOP; /* For vPMD. */ + const uint16_t desc_n = + desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; int ret; if (mlx5_is_secondary()) @@ -1147,9 +1173,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, /* Resize if rxq size is changed. */ if (rxq_ctrl->rxq.elts_n != log2above(desc)) { rxq_ctrl = rte_realloc(rxq_ctrl, - sizeof(*rxq_ctrl) + - (desc + desc_pad) * - sizeof(struct rte_mbuf *), + sizeof(*rxq_ctrl) + desc_n * + sizeof(struct rte_mbuf *), RTE_CACHE_LINE_SIZE); if (!rxq_ctrl) { ERROR("%p: unable to reallocate queue index %u", @@ -1160,8 +1185,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } } else { rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) + - (desc + desc_pad) * - sizeof(struct rte_mbuf *), + desc_n * + sizeof(struct rte_mbuf *), 0, socket); if (rxq_ctrl == NULL) { ERROR("%p: unable to allocate queue index %u", diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index b07bcd118c..e9c4502a31 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -2028,9 +2028,3 @@ priv_check_vec_rx_support(struct priv *priv) (void)priv; return -ENOTSUP; } - -void __attribute__((weak)) -priv_prep_vec_rx_function(struct priv *priv) -{ - (void)priv; -} diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 7de1d10863..d85ea1626f 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -348,7 +348,6 @@ int priv_check_raw_vec_tx_support(struct priv *); int priv_check_vec_tx_support(struct priv *); int rxq_check_vec_support(struct rxq *); int priv_check_vec_rx_support(struct priv *); -void priv_prep_vec_rx_function(struct priv *); uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t); diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 8560f745ac..67f63c692f 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -1377,41 +1377,3 @@ priv_check_vec_rx_support(struct priv *priv) return -ENOTSUP; return 1; } - -/** - * Prepare for vectorized RX. - * - * @param priv - * Pointer to private structure. - */ -void -priv_prep_vec_rx_function(struct priv *priv) -{ - uint16_t i; - - for (i = 0; i < priv->rxqs_n; ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; - struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; - const uint16_t desc = 1 << rxq->elts_n; - int j; - - assert(rxq->elts_n == rxq->cqe_n); - /* Initialize default rearm_data for vPMD. */ - mbuf_init->data_off = RTE_PKTMBUF_HEADROOM; - rte_mbuf_refcnt_set(mbuf_init, 1); - mbuf_init->nb_segs = 1; - mbuf_init->port = rxq->port_id; - /* - * prevent compiler reordering: - * rearm_data covers previous fields. - */ - rte_compiler_barrier(); - rxq->mbuf_initializer = - *(uint64_t *)&mbuf_init->rearm_data; - /* Padding with a fake mbuf for vectorized Rx. */ - for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) - (*rxq->elts)[desc + j] = &rxq->fake_mbuf; - /* Mark that it need to be cleaned up for rxq_alloc_elts(). */ - rxq->trim_elts = 1; - } -} -- 2.20.1