1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #ifndef RTE_PMD_MLX5_FLOW_H_
6 #define RTE_PMD_MLX5_FLOW_H_
8 #include <netinet/in.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
21 #pragma GCC diagnostic error "-Wpedantic"
24 #include <rte_atomic.h>
25 #include <rte_alarm.h>
32 /* Private rte flow items. */
33 enum mlx5_rte_flow_item_type {
34 MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
35 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
36 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
37 MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
40 /* Private (internal) rte flow actions. */
41 enum mlx5_rte_flow_action_type {
42 MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
43 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
44 MLX5_RTE_FLOW_ACTION_TYPE_MARK,
45 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
48 /* Matches on selected register. */
49 struct mlx5_rte_flow_item_tag {
54 /* Modify selected register. */
55 struct mlx5_rte_flow_action_set_tag {
60 struct mlx5_flow_action_copy_mreg {
65 /* Matches on source queue. */
66 struct mlx5_rte_flow_item_tx_queue {
70 /* Feature name to allocate metadata register. */
71 enum mlx5_feature_name {
84 /* Pattern outer Layer bits. */
85 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
86 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
87 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
88 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
89 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
90 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
92 /* Pattern inner Layer bits. */
93 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
94 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
95 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
96 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
97 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
98 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
100 /* Pattern tunnel Layer bits. */
101 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
102 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
103 #define MLX5_FLOW_LAYER_GRE (1u << 14)
104 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
105 /* List of tunnel Layer bits continued below. */
107 /* General pattern items bits. */
108 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
109 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
110 #define MLX5_FLOW_ITEM_TAG (1u << 18)
111 #define MLX5_FLOW_ITEM_MARK (1u << 19)
113 /* Pattern MISC bits. */
114 #define MLX5_FLOW_LAYER_ICMP (1u << 20)
115 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
116 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
118 /* Pattern tunnel Layer bits (continued). */
119 #define MLX5_FLOW_LAYER_IPIP (1u << 23)
120 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
121 #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
122 #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
125 #define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27)
127 /* Pattern tunnel Layer bits (continued). */
128 #define MLX5_FLOW_LAYER_GTP (1u << 28)
131 #define MLX5_FLOW_LAYER_OUTER_L3 \
132 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
133 #define MLX5_FLOW_LAYER_OUTER_L4 \
134 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
135 #define MLX5_FLOW_LAYER_OUTER \
136 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
137 MLX5_FLOW_LAYER_OUTER_L4)
140 #define MLX5_FLOW_LAYER_TUNNEL \
141 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
142 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
143 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
144 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
147 #define MLX5_FLOW_LAYER_INNER_L3 \
148 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
149 #define MLX5_FLOW_LAYER_INNER_L4 \
150 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
151 #define MLX5_FLOW_LAYER_INNER \
152 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
153 MLX5_FLOW_LAYER_INNER_L4)
156 #define MLX5_FLOW_LAYER_L2 \
157 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
158 #define MLX5_FLOW_LAYER_L3_IPV4 \
159 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
160 #define MLX5_FLOW_LAYER_L3_IPV6 \
161 (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
162 #define MLX5_FLOW_LAYER_L3 \
163 (MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
164 #define MLX5_FLOW_LAYER_L4 \
165 (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
168 #define MLX5_FLOW_ACTION_DROP (1u << 0)
169 #define MLX5_FLOW_ACTION_QUEUE (1u << 1)
170 #define MLX5_FLOW_ACTION_RSS (1u << 2)
171 #define MLX5_FLOW_ACTION_FLAG (1u << 3)
172 #define MLX5_FLOW_ACTION_MARK (1u << 4)
173 #define MLX5_FLOW_ACTION_COUNT (1u << 5)
174 #define MLX5_FLOW_ACTION_PORT_ID (1u << 6)
175 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7)
176 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8)
177 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9)
178 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10)
179 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11)
180 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12)
181 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13)
182 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14)
183 #define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15)
184 #define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16)
185 #define MLX5_FLOW_ACTION_JUMP (1u << 17)
186 #define MLX5_FLOW_ACTION_SET_TTL (1u << 18)
187 #define MLX5_FLOW_ACTION_DEC_TTL (1u << 19)
188 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20)
189 #define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21)
190 #define MLX5_FLOW_ACTION_ENCAP (1u << 22)
191 #define MLX5_FLOW_ACTION_DECAP (1u << 23)
192 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1u << 24)
193 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1u << 25)
194 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1u << 26)
195 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1u << 27)
196 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
197 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
198 #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
199 #define MLX5_FLOW_ACTION_METER (1ull << 31)
200 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
201 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
203 #define MLX5_FLOW_FATE_ACTIONS \
204 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
205 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP)
207 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
208 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
209 MLX5_FLOW_ACTION_JUMP)
212 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
213 MLX5_FLOW_ACTION_SET_IPV4_DST | \
214 MLX5_FLOW_ACTION_SET_IPV6_SRC | \
215 MLX5_FLOW_ACTION_SET_IPV6_DST | \
216 MLX5_FLOW_ACTION_SET_TP_SRC | \
217 MLX5_FLOW_ACTION_SET_TP_DST | \
218 MLX5_FLOW_ACTION_SET_TTL | \
219 MLX5_FLOW_ACTION_DEC_TTL | \
220 MLX5_FLOW_ACTION_SET_MAC_SRC | \
221 MLX5_FLOW_ACTION_SET_MAC_DST | \
222 MLX5_FLOW_ACTION_INC_TCP_SEQ | \
223 MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
224 MLX5_FLOW_ACTION_INC_TCP_ACK | \
225 MLX5_FLOW_ACTION_DEC_TCP_ACK | \
226 MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
227 MLX5_FLOW_ACTION_SET_TAG | \
228 MLX5_FLOW_ACTION_MARK_EXT | \
229 MLX5_FLOW_ACTION_SET_META | \
230 MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
231 MLX5_FLOW_ACTION_SET_IPV6_DSCP)
233 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
234 MLX5_FLOW_ACTION_OF_PUSH_VLAN)
236 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
239 #define IPPROTO_MPLS 137
242 /* UDP port number for MPLS */
243 #define MLX5_UDP_PORT_MPLS 6635
245 /* UDP port numbers for VxLAN. */
246 #define MLX5_UDP_PORT_VXLAN 4789
247 #define MLX5_UDP_PORT_VXLAN_GPE 4790
249 /* UDP port numbers for GENEVE. */
250 #define MLX5_UDP_PORT_GENEVE 6081
252 /* Priority reserved for default flows. */
253 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
256 * Number of sub priorities.
257 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
258 * matching on the NIC (firmware dependent) L4 most have the higher priority
259 * followed by L3 and ending with L2.
261 #define MLX5_PRIORITY_MAP_L2 2
262 #define MLX5_PRIORITY_MAP_L3 1
263 #define MLX5_PRIORITY_MAP_L4 0
264 #define MLX5_PRIORITY_MAP_MAX 3
266 /* Valid layer type for IPV4 RSS. */
267 #define MLX5_IPV4_LAYER_TYPES \
268 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
269 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
270 ETH_RSS_NONFRAG_IPV4_OTHER)
272 /* IBV hash source bits for IPV4. */
273 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
275 /* Valid layer type for IPV6 RSS. */
276 #define MLX5_IPV6_LAYER_TYPES \
277 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
278 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \
279 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
281 /* IBV hash source bits for IPV6. */
282 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
284 /* IBV hash bits for L3 SRC. */
285 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
287 /* IBV hash bits for L3 DST. */
288 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
290 /* IBV hash bits for TCP. */
291 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
292 IBV_RX_HASH_DST_PORT_TCP)
294 /* IBV hash bits for UDP. */
295 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
296 IBV_RX_HASH_DST_PORT_UDP)
298 /* IBV hash bits for L4 SRC. */
299 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
300 IBV_RX_HASH_SRC_PORT_UDP)
302 /* IBV hash bits for L4 DST. */
303 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
304 IBV_RX_HASH_DST_PORT_UDP)
306 /* Geneve header first 16Bit */
307 #define MLX5_GENEVE_VER_MASK 0x3
308 #define MLX5_GENEVE_VER_SHIFT 14
309 #define MLX5_GENEVE_VER_VAL(a) \
310 (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
311 #define MLX5_GENEVE_OPTLEN_MASK 0x3F
312 #define MLX5_GENEVE_OPTLEN_SHIFT 7
313 #define MLX5_GENEVE_OPTLEN_VAL(a) \
314 (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
315 #define MLX5_GENEVE_OAMF_MASK 0x1
316 #define MLX5_GENEVE_OAMF_SHIFT 7
317 #define MLX5_GENEVE_OAMF_VAL(a) \
318 (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
319 #define MLX5_GENEVE_CRITO_MASK 0x1
320 #define MLX5_GENEVE_CRITO_SHIFT 6
321 #define MLX5_GENEVE_CRITO_VAL(a) \
322 (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
323 #define MLX5_GENEVE_RSVD_MASK 0x3F
324 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
326 * The length of the Geneve options fields, expressed in four byte multiples,
327 * not including the eight byte fixed tunnel.
329 #define MLX5_GENEVE_OPT_LEN_0 14
330 #define MLX5_GENEVE_OPT_LEN_1 63
332 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
333 sizeof(struct rte_flow_item_ipv4))
335 enum mlx5_flow_drv_type {
338 MLX5_FLOW_TYPE_VERBS,
342 /* Matcher PRM representation */
343 struct mlx5_flow_dv_match_params {
345 /**< Size of match value. Do NOT split size and key! */
346 uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
347 /**< Matcher value. This value is used as the mask or as a key. */
350 /* Matcher structure. */
351 struct mlx5_flow_dv_matcher {
352 LIST_ENTRY(mlx5_flow_dv_matcher) next;
353 /**< Pointer to the next element. */
354 struct mlx5_flow_tbl_resource *tbl;
355 /**< Pointer to the table(group) the matcher associated with. */
356 rte_atomic32_t refcnt; /**< Reference counter. */
357 void *matcher_object; /**< Pointer to DV matcher */
358 uint16_t crc; /**< CRC of key. */
359 uint16_t priority; /**< Priority of matcher. */
360 struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
363 #define MLX5_ENCAP_MAX_LEN 132
365 /* Encap/decap resource structure. */
366 struct mlx5_flow_dv_encap_decap_resource {
367 ILIST_ENTRY(uint32_t)next;
368 /* Pointer to next element. */
369 rte_atomic32_t refcnt; /**< Reference counter. */
371 /**< Verbs encap/decap action object. */
372 uint8_t buf[MLX5_ENCAP_MAX_LEN];
374 uint8_t reformat_type;
376 uint64_t flags; /**< Flags for RDMA API. */
379 /* Tag resource structure. */
380 struct mlx5_flow_dv_tag_resource {
381 struct mlx5_hlist_entry entry;
382 /**< hash list entry for tag resource, tag value as the key. */
384 /**< Verbs tag action object. */
385 rte_atomic32_t refcnt; /**< Reference counter. */
386 uint32_t idx; /**< Index for the index memory pool. */
390 * Number of modification commands.
391 * If extensive metadata registers are supported, the maximal actions amount is
392 * 16 and 8 otherwise on root table. The validation could also be done in the
393 * lower driver layer.
394 * On non-root table, there is no limitation, but 32 is enough right now.
396 #define MLX5_MAX_MODIFY_NUM 32
397 #define MLX5_ROOT_TBL_MODIFY_NUM 16
398 #define MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG 8
400 /* Modify resource structure */
401 struct mlx5_flow_dv_modify_hdr_resource {
402 LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next;
403 /* Pointer to next element. */
404 rte_atomic32_t refcnt; /**< Reference counter. */
405 struct ibv_flow_action *verbs_action;
406 /**< Verbs modify header action object. */
407 uint8_t ft_type; /**< Flow table type, Rx or Tx. */
408 uint32_t actions_num; /**< Number of modification actions. */
409 uint64_t flags; /**< Flags for RDMA API. */
410 struct mlx5_modification_cmd actions[];
411 /**< Modification actions. */
414 /* Jump action resource structure. */
415 struct mlx5_flow_dv_jump_tbl_resource {
416 rte_atomic32_t refcnt; /**< Reference counter. */
417 uint8_t ft_type; /**< Flow table type, Rx or Tx. */
418 void *action; /**< Pointer to the rdma core action. */
421 /* Port ID resource structure. */
422 struct mlx5_flow_dv_port_id_action_resource {
423 LIST_ENTRY(mlx5_flow_dv_port_id_action_resource) next;
424 /* Pointer to next element. */
425 rte_atomic32_t refcnt; /**< Reference counter. */
427 /**< Verbs tag action object. */
428 uint32_t port_id; /**< Port ID value. */
431 /* Push VLAN action resource structure */
432 struct mlx5_flow_dv_push_vlan_action_resource {
433 ILIST_ENTRY(uint32_t)next;
434 /* Pointer to next element. */
435 rte_atomic32_t refcnt; /**< Reference counter. */
436 void *action; /**< Direct verbs action object. */
437 uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
438 rte_be32_t vlan_tag; /**< VLAN tag value. */
441 /* Metadata register copy table entry. */
442 struct mlx5_flow_mreg_copy_resource {
444 * Hash list entry for copy table.
445 * - Key is 32/64-bit MARK action ID.
446 * - MUST be the first entry.
448 struct mlx5_hlist_entry hlist_ent;
449 LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
450 /* List entry for device flows. */
451 uint32_t refcnt; /* Reference counter. */
452 uint32_t appcnt; /* Apply/Remove counter. */
453 struct rte_flow *flow; /* Built flow for copy. */
456 /* Table data structure of the hash organization. */
457 struct mlx5_flow_tbl_data_entry {
458 struct mlx5_hlist_entry entry;
459 /**< hash list entry, 64-bits key inside. */
460 struct mlx5_flow_tbl_resource tbl;
461 /**< flow table resource. */
462 LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
463 /**< matchers' header associated with the flow table. */
464 struct mlx5_flow_dv_jump_tbl_resource jump;
465 /**< jump resource, at most one for each table created. */
468 /* Verbs specification header. */
469 struct ibv_spec_header {
470 enum ibv_flow_spec_type type;
474 struct mlx5_flow_rss {
476 uint32_t queue_num; /**< Number of entries in @p queue. */
477 uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
478 uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
479 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
482 /** Device flow handle structure for DV mode only. */
483 struct mlx5_flow_handle_dv {
485 struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
486 uint32_t encap_decap;
487 /**< Index to encap/decap resource in cache. */
488 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
489 /**< Pointer to modify header resource in cache. */
490 struct mlx5_flow_dv_jump_tbl_resource *jump;
491 /**< Pointer to the jump action resource. */
492 struct mlx5_flow_dv_port_id_action_resource *port_id_action;
493 /**< Pointer to port ID action resource. */
494 struct mlx5_vf_vlan vf_vlan;
495 /**< Structure for VF VLAN workaround. */
496 uint32_t push_vlan_res;
497 /**< Index to push VLAN action resource in cache. */
498 uint32_t tag_resource;
499 /**< Index to the tag action. */
502 /** Device flow handle structure: used both for creating & destroying. */
503 struct mlx5_flow_handle {
504 LIST_ENTRY(mlx5_flow_handle) next;
505 /**< Pointer to next device flow handle. */
507 /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
509 /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
510 void *ib_flow; /**< Verbs flow pointer. */
511 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
512 struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
514 uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
515 uint32_t mtr_flow_id; /**< Unique meter match flow id. */
517 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
518 struct mlx5_flow_handle_dv dvh;
523 * Size for Verbs device flow handle structure only. Do not use the DV only
524 * structure in Verbs. No DV flows attributes will be accessed.
525 * Macro offsetof() could also be used here.
527 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
528 #define MLX5_FLOW_HANDLE_VERBS_SIZE \
529 (sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
531 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
535 * Max number of actions per DV flow.
536 * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
537 * in rdma-core file providers/mlx5/verbs.c.
539 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
541 /** Device flow structure only for DV flow creation. */
542 struct mlx5_flow_dv_workspace {
543 uint32_t group; /**< The group index. */
544 uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
545 int actions_n; /**< number of actions. */
546 void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
547 struct mlx5_flow_dv_encap_decap_resource *encap_decap;
548 /**< Pointer to encap/decap resource in cache. */
549 struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
550 /**< Pointer to push VLAN action resource in cache. */
551 struct mlx5_flow_dv_tag_resource *tag_resource;
552 /**< pointer to the tag action. */
553 struct mlx5_flow_dv_match_params value;
554 /**< Holds the value that the packet is compared to. */
558 * Maximal Verbs flow specifications & actions size.
559 * Some elements are mutually exclusive, but enough space should be allocated.
560 * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
561 * 2. One tunnel header (exception: GRE + MPLS),
562 * SPEC length: GRE == tunnel.
563 * Actions: 1. 1 Mark OR Flag.
564 * 2. 1 Drop (if any).
565 * 3. No limitation for counters, but it makes no sense to support too
566 * many counters in a single device flow.
568 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
569 #define MLX5_VERBS_MAX_SPEC_SIZE \
571 (2 * (sizeof(struct ibv_flow_spec_eth) + \
572 sizeof(struct ibv_flow_spec_ipv6) + \
573 sizeof(struct ibv_flow_spec_tcp_udp)) + \
574 sizeof(struct ibv_flow_spec_gre) + \
575 sizeof(struct ibv_flow_spec_mpls)) \
578 #define MLX5_VERBS_MAX_SPEC_SIZE \
580 (2 * (sizeof(struct ibv_flow_spec_eth) + \
581 sizeof(struct ibv_flow_spec_ipv6) + \
582 sizeof(struct ibv_flow_spec_tcp_udp)) + \
583 sizeof(struct ibv_flow_spec_tunnel)) \
587 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
588 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
589 #define MLX5_VERBS_MAX_ACT_SIZE \
591 sizeof(struct ibv_flow_spec_action_tag) + \
592 sizeof(struct ibv_flow_spec_action_drop) + \
593 sizeof(struct ibv_flow_spec_counter_action) * 4 \
596 #define MLX5_VERBS_MAX_ACT_SIZE \
598 sizeof(struct ibv_flow_spec_action_tag) + \
599 sizeof(struct ibv_flow_spec_action_drop) \
603 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
604 (MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
606 /** Device flow structure only for Verbs flow creation. */
607 struct mlx5_flow_verbs_workspace {
608 unsigned int size; /**< Size of the attribute. */
609 struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
610 uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
611 /**< Specifications & actions buffer of verbs flow. */
614 /** Maximal number of device sub-flows supported. */
615 #define MLX5_NUM_MAX_DEV_FLOWS 32
617 /** Device flow structure. */
619 struct rte_flow *flow; /**< Pointer to the main flow. */
620 uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
621 bool external; /**< true if the flow is created external to PMD. */
622 uint8_t ingress; /**< 1 if the flow is ingress. */
624 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
625 struct mlx5_flow_dv_workspace dv;
627 struct mlx5_flow_verbs_workspace verbs;
629 struct mlx5_flow_handle *handle;
632 /* Flow meter state. */
633 #define MLX5_FLOW_METER_DISABLE 0
634 #define MLX5_FLOW_METER_ENABLE 1
636 #define MLX5_MAN_WIDTH 8
637 /* Modify this value if enum rte_mtr_color changes. */
638 #define RTE_MTR_DROPPED RTE_COLORS
640 /* Meter policer statistics */
641 struct mlx5_flow_policer_stats {
642 uint32_t cnt[RTE_COLORS + 1];
643 /**< Color counter, extra for drop. */
645 /**< Statistics mask for the colors. */
648 /* Meter table structure. */
649 struct mlx5_meter_domain_info {
650 struct mlx5_flow_tbl_resource *tbl;
652 struct mlx5_flow_tbl_resource *sfx_tbl;
653 /**< Meter suffix table. */
655 /**< Meter color not match default criteria. */
657 /**< Meter color match criteria. */
659 /**< Meter match action. */
660 void *policer_rules[RTE_MTR_DROPPED + 1];
661 /**< Meter policer for the match. */
664 /* Meter table set for TX RX FDB. */
665 struct mlx5_meter_domains_infos {
667 /**< Table user count. */
668 struct mlx5_meter_domain_info egress;
669 /**< TX meter table. */
670 struct mlx5_meter_domain_info ingress;
671 /**< RX meter table. */
672 struct mlx5_meter_domain_info transfer;
673 /**< FDB meter table. */
675 /**< Drop action as not matched. */
676 void *count_actns[RTE_MTR_DROPPED + 1];
677 /**< Counters for match and unmatched statistics. */
678 uint32_t fmp[MLX5_ST_SZ_DW(flow_meter_parameters)];
679 /**< Flow meter parameter. */
681 /**< Flow meter parameter size. */
683 /**< Flow meter action. */
686 /* Meter parameter structure. */
687 struct mlx5_flow_meter {
688 TAILQ_ENTRY(mlx5_flow_meter) next;
689 /**< Pointer to the next flow meter structure. */
692 struct rte_mtr_params params;
693 /**< Meter rule parameters. */
694 struct mlx5_flow_meter_profile *profile;
695 /**< Meter profile parameters. */
696 struct rte_flow_attr attr;
697 /**< Flow attributes. */
698 struct mlx5_meter_domains_infos *mfts;
699 /**< Flow table created for this meter. */
700 struct mlx5_flow_policer_stats policer_stats;
701 /**< Meter policer statistics. */
704 uint32_t active_state:1;
707 /**< Meter shared or not. */
710 /* RFC2697 parameter structure. */
711 struct mlx5_flow_meter_srtcm_rfc2697_prm {
712 /* green_saturation_value = cbs_mantissa * 2^cbs_exponent */
713 uint32_t cbs_exponent:5;
714 uint32_t cbs_mantissa:8;
715 /* cir = 8G * cir_mantissa * 1/(2^cir_exponent) Bytes/Sec */
716 uint32_t cir_exponent:5;
717 uint32_t cir_mantissa:8;
718 /* yellow _saturation_value = ebs_mantissa * 2^ebs_exponent */
719 uint32_t ebs_exponent:5;
720 uint32_t ebs_mantissa:8;
723 /* Flow meter profile structure. */
724 struct mlx5_flow_meter_profile {
725 TAILQ_ENTRY(mlx5_flow_meter_profile) next;
726 /**< Pointer to the next flow meter structure. */
727 uint32_t meter_profile_id; /**< Profile id. */
728 struct rte_mtr_meter_profile profile; /**< Profile detail. */
730 struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
731 /**< srtcm_rfc2697 struct. */
733 uint32_t ref_cnt; /**< Use count. */
736 /* Flow structure. */
738 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
739 enum mlx5_flow_drv_type drv_type; /**< Driver type. */
740 struct mlx5_flow_rss rss; /**< RSS context. */
741 uint32_t counter; /**< Holds flow counter. */
742 struct mlx5_flow_mreg_copy_resource *mreg_copy;
743 /**< pointer to metadata register copy table resource. */
744 struct mlx5_flow_meter *meter; /**< Holds flow meter. */
745 LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
746 /**< Device flow handles that are part of the flow. */
747 struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
748 uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
749 uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
752 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
753 const struct rte_flow_attr *attr,
754 const struct rte_flow_item items[],
755 const struct rte_flow_action actions[],
757 struct rte_flow_error *error);
758 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
759 (struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
760 const struct rte_flow_item items[],
761 const struct rte_flow_action actions[], struct rte_flow_error *error);
762 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
763 struct mlx5_flow *dev_flow,
764 const struct rte_flow_attr *attr,
765 const struct rte_flow_item items[],
766 const struct rte_flow_action actions[],
767 struct rte_flow_error *error);
768 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
769 struct rte_flow_error *error);
770 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
771 struct rte_flow *flow);
772 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
773 struct rte_flow *flow);
774 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
775 struct rte_flow *flow,
776 const struct rte_flow_action *actions,
778 struct rte_flow_error *error);
779 typedef struct mlx5_meter_domains_infos *(*mlx5_flow_create_mtr_tbls_t)
780 (struct rte_eth_dev *dev,
781 const struct mlx5_flow_meter *fm);
782 typedef int (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
783 struct mlx5_meter_domains_infos *tbls);
784 typedef int (*mlx5_flow_create_policer_rules_t)
785 (struct rte_eth_dev *dev,
786 struct mlx5_flow_meter *fm,
787 const struct rte_flow_attr *attr);
788 typedef int (*mlx5_flow_destroy_policer_rules_t)
789 (struct rte_eth_dev *dev,
790 const struct mlx5_flow_meter *fm,
791 const struct rte_flow_attr *attr);
792 typedef uint32_t (*mlx5_flow_counter_alloc_t)
793 (struct rte_eth_dev *dev);
794 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
796 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
798 bool clear, uint64_t *pkts,
800 struct mlx5_flow_driver_ops {
801 mlx5_flow_validate_t validate;
802 mlx5_flow_prepare_t prepare;
803 mlx5_flow_translate_t translate;
804 mlx5_flow_apply_t apply;
805 mlx5_flow_remove_t remove;
806 mlx5_flow_destroy_t destroy;
807 mlx5_flow_query_t query;
808 mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
809 mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
810 mlx5_flow_create_policer_rules_t create_policer_rules;
811 mlx5_flow_destroy_policer_rules_t destroy_policer_rules;
812 mlx5_flow_counter_alloc_t counter_alloc;
813 mlx5_flow_counter_free_t counter_free;
814 mlx5_flow_counter_query_t counter_query;
818 #define MLX5_CNT_CONTAINER(sh, batch, thread) (&(sh)->cmng.ccont \
819 [(((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)])
820 #define MLX5_CNT_CONTAINER_UNUSED(sh, batch, thread) (&(sh)->cmng.ccont \
821 [(~((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)])
825 struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id);
826 void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool);
827 uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id);
828 uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool,
830 int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes,
831 bool external, uint32_t group, bool fdb_def_rule,
832 uint32_t *table, struct rte_flow_error *error);
833 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel,
834 uint64_t layer_types,
835 uint64_t hash_fields);
836 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
837 uint32_t subpriority);
838 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
839 enum mlx5_feature_name feature,
841 struct rte_flow_error *error);
842 const struct rte_flow_action *mlx5_flow_find_action
843 (const struct rte_flow_action *actions,
844 enum rte_flow_action_type action);
845 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
846 const struct rte_flow_attr *attr,
847 struct rte_flow_error *error);
848 int mlx5_flow_validate_action_drop(uint64_t action_flags,
849 const struct rte_flow_attr *attr,
850 struct rte_flow_error *error);
851 int mlx5_flow_validate_action_flag(uint64_t action_flags,
852 const struct rte_flow_attr *attr,
853 struct rte_flow_error *error);
854 int mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
855 uint64_t action_flags,
856 const struct rte_flow_attr *attr,
857 struct rte_flow_error *error);
858 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
859 uint64_t action_flags,
860 struct rte_eth_dev *dev,
861 const struct rte_flow_attr *attr,
862 struct rte_flow_error *error);
863 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
864 uint64_t action_flags,
865 struct rte_eth_dev *dev,
866 const struct rte_flow_attr *attr,
868 struct rte_flow_error *error);
869 int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
870 const struct rte_flow_attr *attributes,
871 struct rte_flow_error *error);
872 int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
874 const uint8_t *nic_mask,
876 struct rte_flow_error *error);
877 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
879 struct rte_flow_error *error);
880 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
882 uint8_t target_protocol,
883 struct rte_flow_error *error);
884 int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
886 const struct rte_flow_item *gre_item,
887 struct rte_flow_error *error);
888 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
892 const struct rte_flow_item_ipv4 *acc_mask,
893 struct rte_flow_error *error);
894 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
898 const struct rte_flow_item_ipv6 *acc_mask,
899 struct rte_flow_error *error);
900 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
901 const struct rte_flow_item *item,
904 struct rte_flow_error *error);
905 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
907 uint8_t target_protocol,
908 const struct rte_flow_item_tcp *flow_mask,
909 struct rte_flow_error *error);
910 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
912 uint8_t target_protocol,
913 struct rte_flow_error *error);
914 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
916 struct rte_eth_dev *dev,
917 struct rte_flow_error *error);
918 int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
920 struct rte_flow_error *error);
921 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
923 struct rte_eth_dev *dev,
924 struct rte_flow_error *error);
925 int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
927 uint8_t target_protocol,
928 struct rte_flow_error *error);
929 int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
931 uint8_t target_protocol,
932 struct rte_flow_error *error);
933 int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
935 uint8_t target_protocol,
936 struct rte_flow_error *error);
937 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
939 struct rte_eth_dev *dev,
940 struct rte_flow_error *error);
941 struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls
942 (struct rte_eth_dev *dev,
943 const struct mlx5_flow_meter *fm);
944 int mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
945 struct mlx5_meter_domains_infos *tbl);
946 int mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
947 struct mlx5_flow_meter *fm,
948 const struct rte_flow_attr *attr);
949 int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
950 struct mlx5_flow_meter *fm,
951 const struct rte_flow_attr *attr);
952 int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
953 struct rte_mtr_error *error);
954 #endif /* RTE_PMD_MLX5_FLOW_H_ */