X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx_vec.c;h=e1b6d5422a42480421b0548e8893a3c2dd12dbbe;hb=f17e4b4ffef9ecf2547ad8ee628e94db8f561b2f;hp=469ea8401da86fc1b973dcd9112008d831973214;hpb=0f20acbf5edaeab8e4c9d400e443679d48008569;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index 469ea8401d..e1b6d5422a 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -19,6 +19,7 @@ #include "mlx5.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" +#include "mlx5_rx.h" #include "mlx5_rxtx_vec.h" #include "mlx5_autoconf.h" @@ -105,22 +106,27 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) rxq->stats.rx_nombuf += n; return; } - for (i = 0; i < n; ++i) { - void *buf_addr; - - /* - * In order to support the mbufs with external attached - * data buffer we should use the buf_addr pointer - * instead of rte_mbuf_buf_addr(). It touches the mbuf - * itself and may impact the performance. - */ - buf_addr = elts[i]->buf_addr; - wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + - RTE_PKTMBUF_HEADROOM); - /* If there's a single MR, no need to replace LKey. */ - if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) - > 1)) + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) { + for (i = 0; i < n; ++i) { + /* + * In order to support the mbufs with external attached + * data buffer we should use the buf_addr pointer + * instead of rte_mbuf_buf_addr(). It touches the mbuf + * itself and may impact the performance. + */ + void *buf_addr = elts[i]->buf_addr; + + wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + + RTE_PKTMBUF_HEADROOM); wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); + } + } else { + for (i = 0; i < n; ++i) { + void *buf_addr = elts[i]->buf_addr; + + wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + + RTE_PKTMBUF_HEADROOM); + } } rxq->rq_ci += n; /* Prevent overflowing into consumed mbufs. */ @@ -148,18 +154,27 @@ mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi); uint32_t elts_idx = rxq->elts_ci & wqe_mask; struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; + unsigned int i; - /* Not to cross queue end. */ - if (n >= rxq->rq_repl_thresh) { + if (n >= rxq->rq_repl_thresh && + rxq->elts_ci - rxq->rq_pi <= + rxq->rq_repl_thresh + MLX5_VPMD_RX_MAX_BURST) { MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n)); MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n) > MLX5_VPMD_DESCS_PER_LOOP); - n = RTE_MIN(n, elts_n - elts_idx); + /* Not to cross queue end. */ + n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, elts_n - elts_idx); + /* Limit replenish number to threshold value. */ + n = RTE_MIN(n, rxq->rq_repl_thresh); if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { rxq->stats.rx_nombuf += n; return; } rxq->elts_ci += n; + /* Prevent overflowing into consumed mbufs. */ + elts_idx = rxq->elts_ci & wqe_mask; + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; } }