X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx_vec.h;h=7d7f016f16eb754f9035b3cd0aa9e210f8877713;hb=3fc8de4f8df4a5f9ca23b0bc2d1ab592719c5daf;hp=c41a9b96a0e84a46e2f8333abe6ceab25c3f3870;hpb=5bfc9fc112dd3c0f239af2488a7d8801f44ed3a3;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h index c41a9b96a0..7d7f016f16 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec.h @@ -40,6 +40,18 @@ #include "mlx5_autoconf.h" #include "mlx5_prm.h" +/* HW checksum offload capabilities of vectorized Tx. */ +#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \ + (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + +/* HW offload capabilities of vectorized Tx. */ +#define MLX5_VEC_TX_OFFLOAD_CAP \ + (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + /* * Compile time sanity check for vectorized functions. */ @@ -68,7 +80,11 @@ S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); +#if (RTE_CACHE_LINE_SIZE == 128) +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64); +#else S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0); +#endif S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) == offsetof(struct mlx5_cqe, pkt_info) + 12); S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + @@ -84,4 +100,43 @@ S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) == S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) == offsetof(struct mlx5_cqe, sop_drop_qpn) + 7); +/** + * Replenish buffers for RX in bulk. + * + * @param rxq + * Pointer to RX queue structure. + * @param n + * Number of buffers to be replenished. + */ +static inline void +mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) +{ + const uint16_t q_n = 1 << rxq->elts_n; + const uint16_t q_mask = q_n - 1; + uint16_t elts_idx = rxq->rq_ci & q_mask; + struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; + volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx]; + unsigned int i; + + assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH); + assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); + assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP); + /* Not to cross queue end. */ + n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); + if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { + rxq->stats.rx_nombuf += n; + return; + } + for (i = 0; i < n; ++i) + wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + + RTE_PKTMBUF_HEADROOM); + rxq->rq_ci += n; + /* Prevent overflowing into consumed mbufs. */ + elts_idx = rxq->rq_ci & q_mask; + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; + rte_io_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); +} + #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */