4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_PRM_H_
35 #define RTE_PMD_MLX5_PRM_H_
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-Wpedantic"
44 #include <infiniband/mlx5_hw.h>
46 #pragma GCC diagnostic error "-Wpedantic"
50 #include "mlx5_autoconf.h"
52 /* Get CQE owner bit. */
53 #define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
56 #define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)
59 #define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)
61 /* Get CQE solicited event. */
62 #define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)
64 /* Invalidate a CQE. */
65 #define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
67 /* Maximum number of packets a multi-packet WQE can handle. */
68 #define MLX5_MPW_DSEG_MAX 5
71 #define MLX5_WQE_DWORD_SIZE 16
74 #define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE)
76 /* Max size of a WQE session. */
77 #define MLX5_WQE_SIZE_MAX 960U
79 /* Compute the number of DS. */
80 #define MLX5_WQE_DS(n) \
81 (((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE)
83 /* Room for inline data in multi-packet WQE. */
84 #define MLX5_MWQE64_INL_DATA 28
86 /* Default minimum number of Tx queues for inlining packets. */
87 #define MLX5_EMPW_MIN_TXQS 8
89 /* Default max packet length to be inlined. */
90 #define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE)
93 #define MLX5_OPC_MOD_ENHANCED_MPSW 0
94 #define MLX5_OPCODE_ENHANCED_MPSW 0x29
96 /* CQE value to inform that VLAN is stripped. */
97 #define MLX5_CQE_VLAN_STRIPPED (1u << 0)
100 #define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
103 #define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
106 #define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
109 #define MLX5_CQE_RX_TCP_PACKET (1u << 4)
112 #define MLX5_CQE_RX_UDP_PACKET (1u << 5)
114 /* IP is fragmented. */
115 #define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
117 /* L2 header is valid. */
118 #define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
120 /* L3 header is valid. */
121 #define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
123 /* L4 header is valid. */
124 #define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
126 /* Outer packet, 0 IPv4, 1 IPv6. */
127 #define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
129 /* Tunnel packet bit in the CQE. */
130 #define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
132 /* Inner L3 checksum offload (Tunneled packets only). */
133 #define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
135 /* Inner L4 checksum offload (Tunneled packets only). */
136 #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
138 /* Is flow mark valid. */
139 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
140 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
142 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
145 /* INVALID is used by packets matching no flow rules. */
146 #define MLX5_FLOW_MARK_INVALID 0
148 /* Maximum allowed value to mark a packet. */
149 #define MLX5_FLOW_MARK_MAX 0xfffff0
151 /* Default mark value used when none is provided. */
152 #define MLX5_FLOW_MARK_DEFAULT 0xffffff
154 /* Maximum number of DS in WQE. */
155 #define MLX5_DSEG_MAX 63
157 /* Subset of struct mlx5_wqe_eth_seg. */
158 struct mlx5_wqe_eth_seg_small {
164 uint16_t inline_hdr_sz;
165 uint8_t inline_hdr[2];
166 } __rte_aligned(MLX5_WQE_DWORD_SIZE);
168 struct mlx5_wqe_inl_small {
171 } __rte_aligned(MLX5_WQE_DWORD_SIZE);
173 struct mlx5_wqe_ctrl {
178 } __rte_aligned(MLX5_WQE_DWORD_SIZE);
180 /* Small common part of the WQE. */
183 struct mlx5_wqe_eth_seg_small eseg;
186 /* Vectorize WQE header. */
196 } __rte_aligned(MLX5_WQE_SIZE);
202 MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
205 /* MPW session status. */
206 enum mlx5_mpw_state {
207 MLX5_MPW_STATE_OPENED,
208 MLX5_MPW_INL_STATE_OPENED,
209 MLX5_MPW_ENHANCED_STATE_OPENED,
210 MLX5_MPW_STATE_CLOSED,
213 /* MPW session descriptor. */
215 enum mlx5_mpw_state state;
218 unsigned int total_len;
219 volatile struct mlx5_wqe *wqe;
221 volatile struct mlx5_wqe_data_seg *dseg[MLX5_MPW_DSEG_MAX];
222 volatile uint8_t *raw;
226 /* CQ element structure - should be equal to the cache line size */
228 #if (RTE_CACHE_LINE_SIZE == 128)
233 uint32_t rx_hash_res;
234 uint8_t rx_hash_type;
236 uint16_t hdr_type_etc;
241 uint32_t sop_drop_qpn;
242 uint16_t wqe_counter;
248 * Convert a user mark to flow mark.
251 * Mark value to convert.
254 * Converted mark value.
256 static inline uint32_t
257 mlx5_flow_mark_set(uint32_t val)
262 * Add one to the user value to differentiate un-marked flows from
263 * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
266 if (val != MLX5_FLOW_MARK_DEFAULT)
268 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
270 * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
271 * word, byte-swapped by the kernel on little-endian systems. In this
272 * case, left-shifting the resulting big-endian value ensures the
273 * least significant 24 bits are retained when converting it back.
275 ret = rte_cpu_to_be_32(val) >> 8;
283 * Convert a mark to user mark.
286 * Mark value to convert.
289 * Converted mark value.
291 static inline uint32_t
292 mlx5_flow_mark_get(uint32_t val)
295 * Subtract one from the retrieved value. It was added by
296 * mlx5_flow_mark_set() to distinguish unmarked flows.
298 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
299 return (val >> 8) - 1;
305 #endif /* RTE_PMD_MLX5_PRM_H_ */