1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_H_
9 #include <rte_common.h>
12 #include "mlx5_autoconf.h"
15 /* HW checksum offload capabilities of vectorized Tx. */
16 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
17 (DEV_TX_OFFLOAD_IPV4_CKSUM | \
18 DEV_TX_OFFLOAD_UDP_CKSUM | \
19 DEV_TX_OFFLOAD_TCP_CKSUM | \
20 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
22 /* HW offload capabilities of vectorized Tx. */
23 #define MLX5_VEC_TX_OFFLOAD_CAP \
24 (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
25 DEV_TX_OFFLOAD_MATCH_METADATA | \
26 DEV_TX_OFFLOAD_MULTI_SEGS)
29 * Compile time sanity check for vectorized functions.
32 #define S_ASSERT_RTE_MBUF(s) \
33 static_assert(s, "A field of struct rte_mbuf is changed")
34 #define S_ASSERT_MLX5_CQE(s) \
35 static_assert(s, "A field of struct mlx5_cqe is changed")
37 /* rxq_cq_decompress_v() */
38 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
39 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
40 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
41 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
42 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
43 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
45 /* rxq_cq_to_ptype_oflags_v() */
46 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
47 offsetof(struct rte_mbuf, rearm_data) + 8);
48 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
49 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
52 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
53 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
54 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
55 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
56 #if (RTE_CACHE_LINE_SIZE == 128)
57 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
59 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
61 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
62 offsetof(struct mlx5_cqe, pkt_info) + 12);
63 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 11 ==
64 offsetof(struct mlx5_cqe, hdr_type_etc));
65 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
66 offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
67 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, lro_num_seg) + 12 ==
68 offsetof(struct mlx5_cqe, byte_cnt));
69 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
70 RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
71 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
72 offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
75 * Replenish buffers for RX in bulk.
78 * Pointer to RX queue structure.
80 * Number of buffers to be replenished.
83 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
85 const uint16_t q_n = 1 << rxq->elts_n;
86 const uint16_t q_mask = q_n - 1;
87 uint16_t elts_idx = rxq->rq_ci & q_mask;
88 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
89 volatile struct mlx5_wqe_data_seg *wq =
90 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
93 assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
94 assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
95 assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
96 /* Not to cross queue end. */
97 n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
98 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
99 rxq->stats.rx_nombuf += n;
102 for (i = 0; i < n; ++i) {
106 * Load the virtual address for Rx WQE. non-x86 processors
107 * (mostly RISC such as ARM and Power) are more vulnerable to
108 * load stall. For x86, reducing the number of instructions
109 * seems to matter most.
111 #ifdef RTE_ARCH_X86_64
112 buf_addr = elts[i]->buf_addr;
113 assert(buf_addr == rte_mbuf_buf_addr(elts[i], rxq->mp));
115 buf_addr = rte_mbuf_buf_addr(elts[i], rxq->mp);
116 assert(buf_addr == elts[i]->buf_addr);
118 wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
119 RTE_PKTMBUF_HEADROOM);
120 /* If there's only one MR, no need to replace LKey in WQE. */
121 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
122 wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
125 /* Prevent overflowing into consumed mbufs. */
126 elts_idx = rxq->rq_ci & q_mask;
127 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
128 (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
130 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
133 #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */