X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx_vec.h;h=93b4f517bb3e1aab612d0908cda70d652eee0e2b;hb=2451574a49d24b3f63f780f39bfaefd38aea3331;hp=fda7004e2d635e04441fa2addcdb568a8993b284;hpb=6bd7fbd03c6282689415ea3ff99622a631109984;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h index fda7004e2d..93b4f517bb 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec.h @@ -9,8 +9,10 @@ #include #include +#include + #include "mlx5_autoconf.h" -#include "mlx5_prm.h" +#include "mlx5_mr.h" /* HW checksum offload capabilities of vectorized Tx. */ #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \ @@ -19,12 +21,6 @@ DEV_TX_OFFLOAD_TCP_CKSUM | \ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) -/* HW offload capabilities of vectorized Tx. */ -#define MLX5_VEC_TX_OFFLOAD_CAP \ - (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \ - DEV_TX_OFFLOAD_MATCH_METADATA | \ - DEV_TX_OFFLOAD_MULTI_SEGS) - /* * Compile time sanity check for vectorized functions. */ @@ -60,61 +56,15 @@ S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0); #endif S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) == offsetof(struct mlx5_cqe, pkt_info) + 12); -S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + - sizeof(((struct mlx5_cqe *)0)->rsvd1) == +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 11 == offsetof(struct mlx5_cqe, hdr_type_etc)); S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) == offsetof(struct mlx5_cqe, hdr_type_etc) + 2); -S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) + - sizeof(((struct mlx5_cqe *)0)->rsvd2) == +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, lro_num_seg) + 12 == offsetof(struct mlx5_cqe, byte_cnt)); S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) == RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8)); S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) == offsetof(struct mlx5_cqe, sop_drop_qpn) + 7); -/** - * Replenish buffers for RX in bulk. - * - * @param rxq - * Pointer to RX queue structure. - * @param n - * Number of buffers to be replenished. - */ -static inline void -mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) -{ - const uint16_t q_n = 1 << rxq->elts_n; - const uint16_t q_mask = q_n - 1; - uint16_t elts_idx = rxq->rq_ci & q_mask; - struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; - volatile struct mlx5_wqe_data_seg *wq = - &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; - unsigned int i; - - assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)); - assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); - assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP); - /* Not to cross queue end. */ - n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); - if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { - rxq->stats.rx_nombuf += n; - return; - } - for (i = 0; i < n; ++i) { - wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + - RTE_PKTMBUF_HEADROOM); - /* If there's only one MR, no need to replace LKey in WQE. */ - if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) - wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); - } - rxq->rq_ci += n; - /* Prevent overflowing into consumed mbufs. */ - elts_idx = rxq->rq_ci & q_mask; - for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) - (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; - rte_cio_wmb(); - *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); -} - #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */