1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_H_
9 #include <rte_common.h>
14 #include "mlx5_autoconf.h"
18 /* HW checksum offload capabilities of vectorized Tx. */
19 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
20 (DEV_TX_OFFLOAD_IPV4_CKSUM | \
21 DEV_TX_OFFLOAD_UDP_CKSUM | \
22 DEV_TX_OFFLOAD_TCP_CKSUM | \
23 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
26 * Compile time sanity check for vectorized functions.
29 #define S_ASSERT_RTE_MBUF(s) \
30 static_assert(s, "A field of struct rte_mbuf is changed")
31 #define S_ASSERT_MLX5_CQE(s) \
32 static_assert(s, "A field of struct mlx5_cqe is changed")
34 /* rxq_cq_decompress_v() */
35 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
36 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
37 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
38 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
39 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
40 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
42 /* rxq_cq_to_ptype_oflags_v() */
43 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
44 offsetof(struct rte_mbuf, rearm_data) + 8);
45 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
46 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
49 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
50 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
51 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
52 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
53 #if (RTE_CACHE_LINE_SIZE == 128)
54 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
56 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
58 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
59 offsetof(struct mlx5_cqe, pkt_info) + 12);
60 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 11 ==
61 offsetof(struct mlx5_cqe, hdr_type_etc));
62 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
63 offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
64 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, lro_num_seg) + 12 ==
65 offsetof(struct mlx5_cqe, byte_cnt));
66 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
67 RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
68 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
69 offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
72 * Replenish buffers for RX in bulk.
75 * Pointer to RX queue structure.
78 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
80 const uint16_t q_n = 1 << rxq->elts_n;
81 const uint16_t q_mask = q_n - 1;
82 uint16_t n = q_n - (rxq->rq_ci - rxq->rq_pi);
83 uint16_t elts_idx = rxq->rq_ci & q_mask;
84 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
85 volatile struct mlx5_wqe_data_seg *wq =
86 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
89 if (n >= rxq->rq_repl_thresh) {
90 MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
91 MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) >
92 MLX5_VPMD_DESCS_PER_LOOP);
93 /* Not to cross queue end. */
94 n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
95 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
96 rxq->stats.rx_nombuf += n;
99 for (i = 0; i < n; ++i) {
103 * In order to support the mbufs with external attached
104 * data buffer we should use the buf_addr pointer
105 * instead of rte_mbuf_buf_addr(). It touches the mbuf
106 * itself and may impact the performance.
108 buf_addr = elts[i]->buf_addr;
109 wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
110 RTE_PKTMBUF_HEADROOM);
111 /* If there's a single MR, no need to replace LKey. */
112 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh)
114 wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
117 /* Prevent overflowing into consumed mbufs. */
118 elts_idx = rxq->rq_ci & q_mask;
119 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
120 (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
122 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
126 #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */