1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_H_
9 #include <rte_common.h>
12 #include "mlx5_autoconf.h"
15 /* HW checksum offload capabilities of vectorized Tx. */
16 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
17 (DEV_TX_OFFLOAD_IPV4_CKSUM | \
18 DEV_TX_OFFLOAD_UDP_CKSUM | \
19 DEV_TX_OFFLOAD_TCP_CKSUM | \
20 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
22 /* HW offload capabilities of vectorized Tx. */
23 #define MLX5_VEC_TX_OFFLOAD_CAP \
24 (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
25 DEV_TX_OFFLOAD_MULTI_SEGS)
28 * Compile time sanity check for vectorized functions.
31 #define S_ASSERT_RTE_MBUF(s) \
32 static_assert(s, "A field of struct rte_mbuf is changed")
33 #define S_ASSERT_MLX5_CQE(s) \
34 static_assert(s, "A field of struct mlx5_cqe is changed")
36 /* rxq_cq_decompress_v() */
37 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
38 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
39 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
40 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
41 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
42 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
44 /* rxq_cq_to_ptype_oflags_v() */
45 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
46 offsetof(struct rte_mbuf, rearm_data) + 8);
47 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
48 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
51 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
52 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
53 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
54 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
55 #if (RTE_CACHE_LINE_SIZE == 128)
56 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
58 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
60 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
61 offsetof(struct mlx5_cqe, pkt_info) + 12);
62 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
63 sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
64 offsetof(struct mlx5_cqe, hdr_type_etc));
65 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
66 offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
67 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
68 sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
69 offsetof(struct mlx5_cqe, byte_cnt));
70 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
71 RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
72 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
73 offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
76 * Replenish buffers for RX in bulk.
79 * Pointer to RX queue structure.
81 * Number of buffers to be replenished.
84 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
86 const uint16_t q_n = 1 << rxq->elts_n;
87 const uint16_t q_mask = q_n - 1;
88 uint16_t elts_idx = rxq->rq_ci & q_mask;
89 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
90 volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
93 assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
94 assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
95 assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
96 /* Not to cross queue end. */
97 n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
98 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
99 rxq->stats.rx_nombuf += n;
102 for (i = 0; i < n; ++i) {
103 wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
104 RTE_PKTMBUF_HEADROOM);
105 /* If there's only one MR, no need to replace LKey in WQE. */
106 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
107 wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
110 /* Prevent overflowing into consumed mbufs. */
111 elts_idx = rxq->rq_ci & q_mask;
112 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
113 (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
115 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
118 #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */