X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx_vec.c;h=0e2eab068a7185b9c24027b83dcf4d43ace86ad0;hb=fe3620aabacd504a2b6c807bd0abe18cc1a2da12;hp=d5af2d91ffb61e397fdf9576d99821d8f75dc520;hpb=151cbe3aabf9e1a71e02827e5e1ff0814275019a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index d5af2d91ff..0e2eab068a 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -106,22 +106,27 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) rxq->stats.rx_nombuf += n; return; } - for (i = 0; i < n; ++i) { - void *buf_addr; - - /* - * In order to support the mbufs with external attached - * data buffer we should use the buf_addr pointer - * instead of rte_mbuf_buf_addr(). It touches the mbuf - * itself and may impact the performance. - */ - buf_addr = elts[i]->buf_addr; - wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + - RTE_PKTMBUF_HEADROOM); - /* If there's a single MR, no need to replace LKey. */ - if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) - > 1)) + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) { + for (i = 0; i < n; ++i) { + /* + * In order to support the mbufs with external attached + * data buffer we should use the buf_addr pointer + * instead of rte_mbuf_buf_addr(). It touches the mbuf + * itself and may impact the performance. + */ + void *buf_addr = elts[i]->buf_addr; + + wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + + RTE_PKTMBUF_HEADROOM); wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); + } + } else { + for (i = 0; i < n; ++i) { + void *buf_addr = elts[i]->buf_addr; + + wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + + RTE_PKTMBUF_HEADROOM); + } } rxq->rq_ci += n; /* Prevent overflowing into consumed mbufs. */ @@ -143,7 +148,7 @@ static inline void mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) { const uint16_t wqe_n = 1 << rxq->elts_n; - const uint32_t strd_n = 1 << rxq->strd_num_n; + const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); const uint32_t elts_n = wqe_n * strd_n; const uint32_t wqe_mask = elts_n - 1; uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi); @@ -152,7 +157,8 @@ mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) unsigned int i; if (n >= rxq->rq_repl_thresh && - rxq->elts_ci - rxq->rq_pi <= rxq->rq_repl_thresh) { + rxq->elts_ci - rxq->rq_pi <= + rxq->rq_repl_thresh + MLX5_VPMD_RX_MAX_BURST) { MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n)); MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n) > MLX5_VPMD_DESCS_PER_LOOP); @@ -191,8 +197,8 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, { const uint16_t wqe_n = 1 << rxq->elts_n; const uint16_t wqe_mask = wqe_n - 1; - const uint16_t strd_sz = 1 << rxq->strd_sz_n; - const uint32_t strd_n = 1 << rxq->strd_num_n; + const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz); + const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); const uint32_t elts_n = wqe_n * strd_n; const uint32_t elts_mask = elts_n - 1; uint32_t elts_idx = rxq->rq_pi & elts_mask; @@ -422,7 +428,7 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; const uint16_t wqe_n = 1 << rxq->elts_n; - const uint32_t strd_n = 1 << rxq->strd_num_n; + const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); const uint32_t elts_n = wqe_n * strd_n; const uint32_t elts_mask = elts_n - 1; volatile struct mlx5_cqe *cq; @@ -442,6 +448,8 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, rte_prefetch0(cq + 3); pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); mlx5_rx_mprq_replenish_bulk_mbuf(rxq); + /* Not to move past the allocated mbufs. */ + pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi); /* See if there're unreturned mbufs from compressed CQE. */ rcvd_pkt = rxq->decompressed; if (rcvd_pkt > 0) { @@ -457,8 +465,6 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, /* Not to cross queue end. */ pkts_n = RTE_MIN(pkts_n, elts_n - elts_idx); pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); - /* Not to move past the allocated mbufs. */ - pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi); if (!pkts_n) { *no_cq = !cp_pkt; return cp_pkt; @@ -544,7 +550,7 @@ mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) struct mlx5_rxq_ctrl *ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); - if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) + if (!RXQ_PORT(ctrl)->config.rx_vec_en || rxq->sges_n != 0) return -ENOTSUP; if (rxq->lro) return -ENOTSUP; @@ -572,11 +578,11 @@ mlx5_check_vec_rx_support(struct rte_eth_dev *dev) return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_data *rxq_data = mlx5_rxq_data_get(dev, i); - if (!rxq) + if (!rxq_data) continue; - if (mlx5_rxq_check_vec_support(rxq) < 0) + if (mlx5_rxq_check_vec_support(rxq_data) < 0) break; } if (i != priv->rxqs_n)