1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
35 /* Dev ops structure defined in mlx5.c */
36 extern const struct eth_dev_ops mlx5_dev_ops;
37 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
39 /* Pattern outer Layer bits. */
40 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
42 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
43 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
44 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
45 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
47 /* Pattern inner Layer bits. */
48 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
49 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
50 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
51 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
52 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
53 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
55 /* Pattern tunnel Layer bits. */
56 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
57 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
58 #define MLX5_FLOW_LAYER_GRE (1u << 14)
59 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
62 #define MLX5_FLOW_LAYER_OUTER_L3 \
63 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
64 #define MLX5_FLOW_LAYER_OUTER_L4 \
65 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
66 #define MLX5_FLOW_LAYER_OUTER \
67 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
68 MLX5_FLOW_LAYER_OUTER_L4)
71 #define MLX5_FLOW_LAYER_TUNNEL \
72 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
73 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
76 #define MLX5_FLOW_LAYER_INNER_L3 \
77 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
78 #define MLX5_FLOW_LAYER_INNER_L4 \
79 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
80 #define MLX5_FLOW_LAYER_INNER \
81 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
82 MLX5_FLOW_LAYER_INNER_L4)
84 /* Actions that modify the fate of matching traffic. */
85 #define MLX5_FLOW_FATE_DROP (1u << 0)
86 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
87 #define MLX5_FLOW_FATE_RSS (1u << 2)
89 /* Modify a packet. */
90 #define MLX5_FLOW_MOD_FLAG (1u << 0)
91 #define MLX5_FLOW_MOD_MARK (1u << 1)
92 #define MLX5_FLOW_MOD_COUNT (1u << 2)
95 #define MLX5_FLOW_ACTION_DROP (1u << 0)
96 #define MLX5_FLOW_ACTION_QUEUE (1u << 1)
97 #define MLX5_FLOW_ACTION_RSS (1u << 2)
98 #define MLX5_FLOW_ACTION_FLAG (1u << 3)
99 #define MLX5_FLOW_ACTION_MARK (1u << 4)
100 #define MLX5_FLOW_ACTION_COUNT (1u << 5)
102 #define MLX5_FLOW_FATE_ACTIONS \
103 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)
105 /* possible L3 layers protocols filtering. */
106 #define MLX5_IP_PROTOCOL_TCP 6
107 #define MLX5_IP_PROTOCOL_UDP 17
108 #define MLX5_IP_PROTOCOL_GRE 47
109 #define MLX5_IP_PROTOCOL_MPLS 147
111 /* Priority reserved for default flows. */
112 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
114 enum mlx5_expansion {
116 MLX5_EXPANSION_ROOT_OUTER,
117 MLX5_EXPANSION_ROOT_ETH_VLAN,
118 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
119 MLX5_EXPANSION_OUTER_ETH,
120 MLX5_EXPANSION_OUTER_ETH_VLAN,
121 MLX5_EXPANSION_OUTER_VLAN,
122 MLX5_EXPANSION_OUTER_IPV4,
123 MLX5_EXPANSION_OUTER_IPV4_UDP,
124 MLX5_EXPANSION_OUTER_IPV4_TCP,
125 MLX5_EXPANSION_OUTER_IPV6,
126 MLX5_EXPANSION_OUTER_IPV6_UDP,
127 MLX5_EXPANSION_OUTER_IPV6_TCP,
128 MLX5_EXPANSION_VXLAN,
129 MLX5_EXPANSION_VXLAN_GPE,
133 MLX5_EXPANSION_ETH_VLAN,
136 MLX5_EXPANSION_IPV4_UDP,
137 MLX5_EXPANSION_IPV4_TCP,
139 MLX5_EXPANSION_IPV6_UDP,
140 MLX5_EXPANSION_IPV6_TCP,
143 /** Supported expansion of items. */
144 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
145 [MLX5_EXPANSION_ROOT] = {
146 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
148 MLX5_EXPANSION_IPV6),
149 .type = RTE_FLOW_ITEM_TYPE_END,
151 [MLX5_EXPANSION_ROOT_OUTER] = {
152 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
153 MLX5_EXPANSION_OUTER_IPV4,
154 MLX5_EXPANSION_OUTER_IPV6),
155 .type = RTE_FLOW_ITEM_TYPE_END,
157 [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
158 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
159 .type = RTE_FLOW_ITEM_TYPE_END,
161 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
162 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
163 .type = RTE_FLOW_ITEM_TYPE_END,
165 [MLX5_EXPANSION_OUTER_ETH] = {
166 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
167 MLX5_EXPANSION_OUTER_IPV6,
168 MLX5_EXPANSION_MPLS),
169 .type = RTE_FLOW_ITEM_TYPE_ETH,
172 [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
173 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
174 .type = RTE_FLOW_ITEM_TYPE_ETH,
177 [MLX5_EXPANSION_OUTER_VLAN] = {
178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
179 MLX5_EXPANSION_OUTER_IPV6),
180 .type = RTE_FLOW_ITEM_TYPE_VLAN,
182 [MLX5_EXPANSION_OUTER_IPV4] = {
183 .next = RTE_FLOW_EXPAND_RSS_NEXT
184 (MLX5_EXPANSION_OUTER_IPV4_UDP,
185 MLX5_EXPANSION_OUTER_IPV4_TCP,
187 .type = RTE_FLOW_ITEM_TYPE_IPV4,
188 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
189 ETH_RSS_NONFRAG_IPV4_OTHER,
191 [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
193 MLX5_EXPANSION_VXLAN_GPE),
194 .type = RTE_FLOW_ITEM_TYPE_UDP,
195 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
197 [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
198 .type = RTE_FLOW_ITEM_TYPE_TCP,
199 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
201 [MLX5_EXPANSION_OUTER_IPV6] = {
202 .next = RTE_FLOW_EXPAND_RSS_NEXT
203 (MLX5_EXPANSION_OUTER_IPV6_UDP,
204 MLX5_EXPANSION_OUTER_IPV6_TCP),
205 .type = RTE_FLOW_ITEM_TYPE_IPV6,
206 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
207 ETH_RSS_NONFRAG_IPV6_OTHER,
209 [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
210 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
211 MLX5_EXPANSION_VXLAN_GPE),
212 .type = RTE_FLOW_ITEM_TYPE_UDP,
213 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
215 [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
216 .type = RTE_FLOW_ITEM_TYPE_TCP,
217 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
219 [MLX5_EXPANSION_VXLAN] = {
220 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
221 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
223 [MLX5_EXPANSION_VXLAN_GPE] = {
224 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
226 MLX5_EXPANSION_IPV6),
227 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
229 [MLX5_EXPANSION_GRE] = {
230 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
231 .type = RTE_FLOW_ITEM_TYPE_GRE,
233 [MLX5_EXPANSION_MPLS] = {
234 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
235 MLX5_EXPANSION_IPV6),
236 .type = RTE_FLOW_ITEM_TYPE_MPLS,
238 [MLX5_EXPANSION_ETH] = {
239 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
240 MLX5_EXPANSION_IPV6),
241 .type = RTE_FLOW_ITEM_TYPE_ETH,
243 [MLX5_EXPANSION_ETH_VLAN] = {
244 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
245 .type = RTE_FLOW_ITEM_TYPE_ETH,
247 [MLX5_EXPANSION_VLAN] = {
248 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
249 MLX5_EXPANSION_IPV6),
250 .type = RTE_FLOW_ITEM_TYPE_VLAN,
252 [MLX5_EXPANSION_IPV4] = {
253 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
254 MLX5_EXPANSION_IPV4_TCP),
255 .type = RTE_FLOW_ITEM_TYPE_IPV4,
256 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
257 ETH_RSS_NONFRAG_IPV4_OTHER,
259 [MLX5_EXPANSION_IPV4_UDP] = {
260 .type = RTE_FLOW_ITEM_TYPE_UDP,
261 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
263 [MLX5_EXPANSION_IPV4_TCP] = {
264 .type = RTE_FLOW_ITEM_TYPE_TCP,
265 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
267 [MLX5_EXPANSION_IPV6] = {
268 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
269 MLX5_EXPANSION_IPV6_TCP),
270 .type = RTE_FLOW_ITEM_TYPE_IPV6,
271 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
272 ETH_RSS_NONFRAG_IPV6_OTHER,
274 [MLX5_EXPANSION_IPV6_UDP] = {
275 .type = RTE_FLOW_ITEM_TYPE_UDP,
276 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
278 [MLX5_EXPANSION_IPV6_TCP] = {
279 .type = RTE_FLOW_ITEM_TYPE_TCP,
280 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
284 /** Handles information leading to a drop fate. */
285 struct mlx5_flow_verbs {
286 LIST_ENTRY(mlx5_flow_verbs) next;
287 unsigned int size; /**< Size of the attribute. */
289 struct ibv_flow_attr *attr;
290 /**< Pointer to the Specification buffer. */
291 uint8_t *specs; /**< Pointer to the specifications. */
293 struct ibv_flow *flow; /**< Verbs flow pointer. */
294 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
295 uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
298 /* Counters information. */
299 struct mlx5_flow_counter {
300 LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
301 uint32_t shared:1; /**< Share counter ID with other flow rules. */
302 uint32_t ref_cnt:31; /**< Reference counter. */
303 uint32_t id; /**< Counter ID. */
304 struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
305 uint64_t hits; /**< Number of packets matched by the rule. */
306 uint64_t bytes; /**< Number of bytes matched by the rule. */
309 /* Flow structure. */
311 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
312 struct rte_flow_attr attributes; /**< User flow attribute. */
314 /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
316 /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
318 /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
319 LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
320 struct mlx5_flow_verbs *cur_verbs;
321 /**< Current Verbs flow structure being filled. */
322 struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
323 struct rte_flow_action_rss rss;/**< RSS context. */
324 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
325 uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
326 void *nl_flow; /**< Netlink flow buffer if relevant. */
329 static const struct rte_flow_ops mlx5_flow_ops = {
330 .validate = mlx5_flow_validate,
331 .create = mlx5_flow_create,
332 .destroy = mlx5_flow_destroy,
333 .flush = mlx5_flow_flush,
334 .isolate = mlx5_flow_isolate,
335 .query = mlx5_flow_query,
338 /* Convert FDIR request to Generic flow. */
340 struct rte_flow_attr attr;
341 struct rte_flow_action actions[2];
342 struct rte_flow_item items[4];
343 struct rte_flow_item_eth l2;
344 struct rte_flow_item_eth l2_mask;
346 struct rte_flow_item_ipv4 ipv4;
347 struct rte_flow_item_ipv6 ipv6;
350 struct rte_flow_item_ipv4 ipv4;
351 struct rte_flow_item_ipv6 ipv6;
354 struct rte_flow_item_udp udp;
355 struct rte_flow_item_tcp tcp;
358 struct rte_flow_item_udp udp;
359 struct rte_flow_item_tcp tcp;
361 struct rte_flow_action_queue queue;
364 /* Verbs specification header. */
365 struct ibv_spec_header {
366 enum ibv_flow_spec_type type;
371 * Number of sub priorities.
372 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
373 * matching on the NIC (firmware dependent) L4 most have the higher priority
374 * followed by L3 and ending with L2.
376 #define MLX5_PRIORITY_MAP_L2 2
377 #define MLX5_PRIORITY_MAP_L3 1
378 #define MLX5_PRIORITY_MAP_L4 0
379 #define MLX5_PRIORITY_MAP_MAX 3
381 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
382 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
383 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
386 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
387 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
388 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
389 { 9, 10, 11 }, { 12, 13, 14 },
392 /* Tunnel information. */
393 struct mlx5_flow_tunnel_info {
394 uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
395 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
398 static struct mlx5_flow_tunnel_info tunnels_info[] = {
400 .tunnel = MLX5_FLOW_LAYER_VXLAN,
401 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
404 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
405 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
408 .tunnel = MLX5_FLOW_LAYER_GRE,
409 .ptype = RTE_PTYPE_TUNNEL_GRE,
412 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
413 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
416 .tunnel = MLX5_FLOW_LAYER_MPLS,
417 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
422 * Discover the maximum number of priority available.
425 * Pointer to Ethernet device.
428 * number of supported flow priority on success, a negative errno
429 * value otherwise and rte_errno is set.
432 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
435 struct ibv_flow_attr attr;
436 struct ibv_flow_spec_eth eth;
437 struct ibv_flow_spec_action_drop drop;
443 .type = IBV_FLOW_SPEC_ETH,
444 .size = sizeof(struct ibv_flow_spec_eth),
447 .size = sizeof(struct ibv_flow_spec_action_drop),
448 .type = IBV_FLOW_SPEC_ACTION_DROP,
451 struct ibv_flow *flow;
452 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
453 uint16_t vprio[] = { 8, 16 };
461 for (i = 0; i != RTE_DIM(vprio); i++) {
462 flow_attr.attr.priority = vprio[i] - 1;
463 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
466 claim_zero(mlx5_glue->destroy_flow(flow));
471 priority = RTE_DIM(priority_map_3);
474 priority = RTE_DIM(priority_map_5);
479 "port %u verbs maximum priority: %d expected 8/16",
480 dev->data->port_id, vprio[i]);
483 mlx5_hrxq_drop_release(dev);
484 DRV_LOG(INFO, "port %u flow maximum priority: %d",
485 dev->data->port_id, priority);
490 * Adjust flow priority.
493 * Pointer to Ethernet device.
495 * Pointer to an rte flow.
498 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
500 struct priv *priv = dev->data->dev_private;
501 uint32_t priority = flow->attributes.priority;
502 uint32_t subpriority = flow->cur_verbs->attr->priority;
504 switch (priv->config.flow_prio) {
505 case RTE_DIM(priority_map_3):
506 priority = priority_map_3[priority][subpriority];
508 case RTE_DIM(priority_map_5):
509 priority = priority_map_5[priority][subpriority];
512 flow->cur_verbs->attr->priority = priority;
516 * Get a flow counter.
519 * Pointer to Ethernet device.
521 * Indicate if this counter is shared with other flows.
523 * Counter identifier.
526 * A pointer to the counter, NULL otherwise and rte_errno is set.
528 static struct mlx5_flow_counter *
529 mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
531 struct priv *priv = dev->data->dev_private;
532 struct mlx5_flow_counter *cnt;
534 LIST_FOREACH(cnt, &priv->flow_counters, next) {
535 if (!cnt->shared || cnt->shared != shared)
542 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
544 struct mlx5_flow_counter tmpl = {
547 .cs = mlx5_glue->create_counter_set
549 &(struct ibv_counter_set_init_attr){
550 .counter_set_id = id,
560 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
566 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
574 * Release a flow counter.
577 * Pointer to the counter handler.
580 mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
582 if (--counter->ref_cnt == 0) {
583 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
584 LIST_REMOVE(counter, next);
590 * Verify the @p attributes will be correctly understood by the NIC and store
591 * them in the @p flow if everything is correct.
594 * Pointer to Ethernet device structure.
595 * @param[in] attributes
596 * Pointer to flow attributes
597 * @param[in, out] flow
598 * Pointer to the rte_flow structure.
604 mlx5_flow_attributes(struct rte_eth_dev *dev,
605 const struct rte_flow_attr *attributes,
606 struct rte_flow *flow)
608 struct priv *priv = dev->data->dev_private;
609 uint32_t priority_max = priv->config.flow_prio - 1;
611 flow->attributes = *attributes;
612 if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
613 flow->attributes.priority = priority_max;
618 * Verify the @p item specifications (spec, last, mask) are compatible with the
622 * Item specification.
624 * @p item->mask or flow default bit-masks.
625 * @param[in] nic_mask
626 * Bit-masks covering supported fields by the NIC to compare with user mask.
628 * Bit-masks size in bytes.
630 * Pointer to error structure.
633 * 0 on success, a negative errno value otherwise and rte_errno is set.
636 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
638 const uint8_t *nic_mask,
640 struct rte_flow_error *error)
645 for (i = 0; i < size; ++i)
646 if ((nic_mask[i] | mask[i]) != nic_mask[i])
647 return rte_flow_error_set(error, ENOTSUP,
648 RTE_FLOW_ERROR_TYPE_ITEM,
650 "mask enables non supported"
652 if (!item->spec && (item->mask || item->last))
653 return rte_flow_error_set(error, EINVAL,
654 RTE_FLOW_ERROR_TYPE_ITEM, item,
655 "mask/last without a spec is not"
657 if (item->spec && item->last) {
663 for (i = 0; i < size; ++i) {
664 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
665 last[i] = ((const uint8_t *)item->last)[i] & mask[i];
667 ret = memcmp(spec, last, size);
669 return rte_flow_error_set(error, ENOTSUP,
670 RTE_FLOW_ERROR_TYPE_ITEM,
672 "range is not supported");
678 * Add a verbs item specification into @p flow.
680 * @param[in, out] flow
681 * Pointer to flow structure.
683 * Create specification.
685 * Size in bytes of the specification to copy.
688 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
690 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
695 dst = (void *)(verbs->specs + verbs->size);
696 memcpy(dst, src, size);
697 ++verbs->attr->num_of_specs;
703 * Adjust verbs hash fields according to the @p flow information.
705 * @param[in, out] flow.
706 * Pointer to flow structure.
708 * 1 when the hash field is for a tunnel item.
709 * @param[in] layer_types
711 * @param[in] hash_fields
715 mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
716 int tunnel __rte_unused,
717 uint32_t layer_types, uint64_t hash_fields)
719 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
720 hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
721 if (flow->rss.level == 2 && !tunnel)
723 else if (flow->rss.level < 2 && tunnel)
726 if (!(flow->rss.types & layer_types))
728 flow->cur_verbs->hash_fields |= hash_fields;
732 * Convert the @p item into a Verbs specification after ensuring the NIC
733 * will understand and process it correctly.
734 * If the necessary size for the conversion is greater than the @p flow_size,
735 * nothing is written in @p flow, the validation is still performed.
738 * Item specification.
739 * @param[in, out] flow
740 * Pointer to flow structure.
741 * @param[in] flow_size
742 * Size in bytes of the available space in @p flow, if too small, nothing is
746 * On success the number of bytes consumed/necessary, if the returned value
747 * is lesser or equal to @p flow_size, the @p item has fully been converted,
748 * otherwise another call with this returned memory size should be done.
749 * On error, a negative errno value is returned and rte_errno is set.
752 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
753 const size_t flow_size)
755 const struct rte_flow_item_eth *spec = item->spec;
756 const struct rte_flow_item_eth *mask = item->mask;
757 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
758 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
759 struct ibv_flow_spec_eth eth = {
760 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
765 mask = &rte_flow_item_eth_mask;
766 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
767 MLX5_FLOW_LAYER_OUTER_L2;
768 if (size > flow_size)
773 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
774 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
775 eth.val.ether_type = spec->type;
776 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
777 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
778 eth.mask.ether_type = mask->type;
779 /* Remove unwanted bits from values. */
780 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
781 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
782 eth.val.src_mac[i] &= eth.mask.src_mac[i];
784 eth.val.ether_type &= eth.mask.ether_type;
786 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
787 mlx5_flow_spec_verbs_add(flow, ð, size);
792 * Update the VLAN tag in the Verbs Ethernet specification.
794 * @param[in, out] attr
795 * Pointer to Verbs attributes structure.
797 * Verbs structure containing the VLAN information to copy.
800 mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
801 struct ibv_flow_spec_eth *eth)
804 const enum ibv_flow_spec_type search = eth->type;
805 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
806 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
808 for (i = 0; i != attr->num_of_specs; ++i) {
809 if (hdr->type == search) {
810 struct ibv_flow_spec_eth *e =
811 (struct ibv_flow_spec_eth *)hdr;
813 e->val.vlan_tag = eth->val.vlan_tag;
814 e->mask.vlan_tag = eth->mask.vlan_tag;
815 e->val.ether_type = eth->val.ether_type;
816 e->mask.ether_type = eth->mask.ether_type;
819 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
824 * Convert the @p item into @p flow (or by updating the already present
825 * Ethernet Verbs) specification after ensuring the NIC will understand and
826 * process it correctly.
827 * If the necessary size for the conversion is greater than the @p flow_size,
828 * nothing is written in @p flow, the validation is still performed.
831 * Item specification.
832 * @param[in, out] flow
833 * Pointer to flow structure.
834 * @param[in] flow_size
835 * Size in bytes of the available space in @p flow, if too small, nothing is
839 * On success the number of bytes consumed/necessary, if the returned value
840 * is lesser or equal to @p flow_size, the @p item has fully been converted,
841 * otherwise another call with this returned memory size should be done.
842 * On error, a negative errno value is returned and rte_errno is set.
845 mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
846 const size_t flow_size)
848 const struct rte_flow_item_vlan *spec = item->spec;
849 const struct rte_flow_item_vlan *mask = item->mask;
850 unsigned int size = sizeof(struct ibv_flow_spec_eth);
851 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
852 struct ibv_flow_spec_eth eth = {
853 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
856 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
857 MLX5_FLOW_LAYER_OUTER_L2;
860 mask = &rte_flow_item_vlan_mask;
862 eth.val.vlan_tag = spec->tci;
863 eth.mask.vlan_tag = mask->tci;
864 eth.val.vlan_tag &= eth.mask.vlan_tag;
865 eth.val.ether_type = spec->inner_type;
866 eth.mask.ether_type = mask->inner_type;
867 eth.val.ether_type &= eth.mask.ether_type;
869 if (!(flow->layers & l2m)) {
870 if (size <= flow_size) {
871 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
872 mlx5_flow_spec_verbs_add(flow, ð, size);
876 mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
878 size = 0; /* Only an update is done in eth specification. */
880 flow->layers |= tunnel ?
881 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
882 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
887 * Convert the @p item into a Verbs specification after ensuring the NIC
888 * will understand and process it correctly.
889 * If the necessary size for the conversion is greater than the @p flow_size,
890 * nothing is written in @p flow, the validation is still performed.
893 * Item specification.
894 * @param[in, out] flow
895 * Pointer to flow structure.
896 * @param[in] flow_size
897 * Size in bytes of the available space in @p flow, if too small, nothing is
901 * On success the number of bytes consumed/necessary, if the returned value
902 * is lesser or equal to @p flow_size, the @p item has fully been converted,
903 * otherwise another call with this returned memory size should be done.
906 mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
907 const size_t flow_size)
909 const struct rte_flow_item_ipv4 *spec = item->spec;
910 const struct rte_flow_item_ipv4 *mask = item->mask;
911 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
912 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
913 struct ibv_flow_spec_ipv4_ext ipv4 = {
914 .type = IBV_FLOW_SPEC_IPV4_EXT |
915 (tunnel ? IBV_FLOW_SPEC_INNER : 0),
920 mask = &rte_flow_item_ipv4_mask;
921 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
922 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
924 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
925 .src_ip = spec->hdr.src_addr,
926 .dst_ip = spec->hdr.dst_addr,
927 .proto = spec->hdr.next_proto_id,
928 .tos = spec->hdr.type_of_service,
930 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
931 .src_ip = mask->hdr.src_addr,
932 .dst_ip = mask->hdr.dst_addr,
933 .proto = mask->hdr.next_proto_id,
934 .tos = mask->hdr.type_of_service,
936 /* Remove unwanted bits from values. */
937 ipv4.val.src_ip &= ipv4.mask.src_ip;
938 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
939 ipv4.val.proto &= ipv4.mask.proto;
940 ipv4.val.tos &= ipv4.mask.tos;
942 if (size <= flow_size) {
943 mlx5_flow_verbs_hashfields_adjust
945 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
946 ETH_RSS_NONFRAG_IPV4_TCP |
947 ETH_RSS_NONFRAG_IPV4_UDP |
948 ETH_RSS_NONFRAG_IPV4_OTHER),
949 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
950 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
951 mlx5_flow_spec_verbs_add(flow, &ipv4, size);
957 * Convert the @p item into a Verbs specification after ensuring the NIC
958 * will understand and process it correctly.
959 * If the necessary size for the conversion is greater than the @p flow_size,
960 * nothing is written in @p flow, the validation is still performed.
963 * Item specification.
964 * @param[in, out] flow
965 * Pointer to flow structure.
966 * @param[in] flow_size
967 * Size in bytes of the available space in @p flow, if too small, nothing is
971 * On success the number of bytes consumed/necessary, if the returned value
972 * is lesser or equal to @p flow_size, the @p item has fully been converted,
973 * otherwise another call with this returned memory size should be done.
976 mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
977 const size_t flow_size)
979 const struct rte_flow_item_ipv6 *spec = item->spec;
980 const struct rte_flow_item_ipv6 *mask = item->mask;
981 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
982 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
983 struct ibv_flow_spec_ipv6 ipv6 = {
984 .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
989 mask = &rte_flow_item_ipv6_mask;
990 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
991 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
994 uint32_t vtc_flow_val;
995 uint32_t vtc_flow_mask;
997 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
998 RTE_DIM(ipv6.val.src_ip));
999 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
1000 RTE_DIM(ipv6.val.dst_ip));
1001 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
1002 RTE_DIM(ipv6.mask.src_ip));
1003 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
1004 RTE_DIM(ipv6.mask.dst_ip));
1005 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
1006 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
1007 ipv6.val.flow_label =
1008 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
1010 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
1012 ipv6.val.next_hdr = spec->hdr.proto;
1013 ipv6.val.hop_limit = spec->hdr.hop_limits;
1014 ipv6.mask.flow_label =
1015 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
1017 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
1019 ipv6.mask.next_hdr = mask->hdr.proto;
1020 ipv6.mask.hop_limit = mask->hdr.hop_limits;
1021 /* Remove unwanted bits from values. */
1022 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
1023 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
1024 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
1026 ipv6.val.flow_label &= ipv6.mask.flow_label;
1027 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
1028 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
1029 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
1031 if (size <= flow_size) {
1032 mlx5_flow_verbs_hashfields_adjust
1034 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1035 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP |
1036 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX |
1037 ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX),
1038 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
1039 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
1040 mlx5_flow_spec_verbs_add(flow, &ipv6, size);
1046 * Convert the @p item into a Verbs specification after ensuring the NIC
1047 * will understand and process it correctly.
1048 * If the necessary size for the conversion is greater than the @p flow_size,
1049 * nothing is written in @p flow, the validation is still performed.
1052 * Item specification.
1053 * @param[in, out] flow
1054 * Pointer to flow structure.
1055 * @param[in] flow_size
1056 * Size in bytes of the available space in @p flow, if too small, nothing is
1060 * On success the number of bytes consumed/necessary, if the returned value
1061 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1062 * otherwise another call with this returned memory size should be done.
1065 mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
1066 const size_t flow_size)
1068 const struct rte_flow_item_udp *spec = item->spec;
1069 const struct rte_flow_item_udp *mask = item->mask;
1070 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1071 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
1072 struct ibv_flow_spec_tcp_udp udp = {
1073 .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1078 mask = &rte_flow_item_udp_mask;
1079 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1080 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1082 udp.val.dst_port = spec->hdr.dst_port;
1083 udp.val.src_port = spec->hdr.src_port;
1084 udp.mask.dst_port = mask->hdr.dst_port;
1085 udp.mask.src_port = mask->hdr.src_port;
1086 /* Remove unwanted bits from values. */
1087 udp.val.src_port &= udp.mask.src_port;
1088 udp.val.dst_port &= udp.mask.dst_port;
1090 if (size <= flow_size) {
1091 mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
1092 (IBV_RX_HASH_SRC_PORT_UDP |
1093 IBV_RX_HASH_DST_PORT_UDP));
1094 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
1095 mlx5_flow_spec_verbs_add(flow, &udp, size);
1101 * Convert the @p item into a Verbs specification after ensuring the NIC
1102 * will understand and process it correctly.
1103 * If the necessary size for the conversion is greater than the @p flow_size,
1104 * nothing is written in @p flow, the validation is still performed.
1107 * Item specification.
1108 * @param[in, out] flow
1109 * Pointer to flow structure.
1110 * @param[in] flow_size
1111 * Size in bytes of the available space in @p flow, if too small, nothing is
1114 * Pointer to error structure.
1117 * On success the number of bytes consumed/necessary, if the returned value
1118 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1119 * otherwise another call with this returned memory size should be done.
1122 mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
1123 const size_t flow_size)
1125 const struct rte_flow_item_tcp *spec = item->spec;
1126 const struct rte_flow_item_tcp *mask = item->mask;
1127 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1128 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
1129 struct ibv_flow_spec_tcp_udp tcp = {
1130 .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1135 mask = &rte_flow_item_tcp_mask;
1136 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1137 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1139 tcp.val.dst_port = spec->hdr.dst_port;
1140 tcp.val.src_port = spec->hdr.src_port;
1141 tcp.mask.dst_port = mask->hdr.dst_port;
1142 tcp.mask.src_port = mask->hdr.src_port;
1143 /* Remove unwanted bits from values. */
1144 tcp.val.src_port &= tcp.mask.src_port;
1145 tcp.val.dst_port &= tcp.mask.dst_port;
1147 if (size <= flow_size) {
1148 mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
1149 (IBV_RX_HASH_SRC_PORT_TCP |
1150 IBV_RX_HASH_DST_PORT_TCP));
1151 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
1152 mlx5_flow_spec_verbs_add(flow, &tcp, size);
1158 * Convert the @p item into a Verbs specification after ensuring the NIC
1159 * will understand and process it correctly.
1160 * If the necessary size for the conversion is greater than the @p flow_size,
1161 * nothing is written in @p flow, the validation is still performed.
1164 * Item specification.
1165 * @param[in, out] flow
1166 * Pointer to flow structure.
1167 * @param[in] flow_size
1168 * Size in bytes of the available space in @p flow, if too small, nothing is
1171 * Pointer to error structure.
1174 * On success the number of bytes consumed/necessary, if the returned value
1175 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1176 * otherwise another call with this returned memory size should be done.
1179 mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
1180 const size_t flow_size)
1182 const struct rte_flow_item_vxlan *spec = item->spec;
1183 const struct rte_flow_item_vxlan *mask = item->mask;
1184 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1185 struct ibv_flow_spec_tunnel vxlan = {
1186 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1192 } id = { .vlan_id = 0, };
1195 mask = &rte_flow_item_vxlan_mask;
1197 memcpy(&id.vni[1], spec->vni, 3);
1198 vxlan.val.tunnel_id = id.vlan_id;
1199 memcpy(&id.vni[1], mask->vni, 3);
1200 vxlan.mask.tunnel_id = id.vlan_id;
1201 /* Remove unwanted bits from values. */
1202 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
1204 if (size <= flow_size) {
1205 mlx5_flow_spec_verbs_add(flow, &vxlan, size);
1206 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1208 flow->layers |= MLX5_FLOW_LAYER_VXLAN;
1213 * Convert the @p item into a Verbs specification after ensuring the NIC
1214 * will understand and process it correctly.
1215 * If the necessary size for the conversion is greater than the @p flow_size,
1216 * nothing is written in @p flow, the validation is still performed.
1219 * Item specification.
1220 * @param[in, out] flow
1221 * Pointer to flow structure.
1222 * @param[in] flow_size
1223 * Size in bytes of the available space in @p flow, if too small, nothing is
1226 * Pointer to error structure.
1229 * On success the number of bytes consumed/necessary, if the returned value
1230 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1231 * otherwise another call with this returned memory size should be done.
1234 mlx5_flow_item_vxlan_gpe(const struct rte_flow_item *item,
1235 struct rte_flow *flow, const size_t flow_size)
1237 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1238 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1239 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1240 struct ibv_flow_spec_tunnel vxlan_gpe = {
1241 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1247 } id = { .vlan_id = 0, };
1250 mask = &rte_flow_item_vxlan_gpe_mask;
1252 memcpy(&id.vni[1], spec->vni, 3);
1253 vxlan_gpe.val.tunnel_id = id.vlan_id;
1254 memcpy(&id.vni[1], mask->vni, 3);
1255 vxlan_gpe.mask.tunnel_id = id.vlan_id;
1256 /* Remove unwanted bits from values. */
1257 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
1259 if (size <= flow_size) {
1260 mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
1261 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1263 flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
1268 * Update the protocol in Verbs IPv4/IPv6 spec.
1270 * @param[in, out] attr
1271 * Pointer to Verbs attributes structure.
1273 * Specification type to search in order to update the IP protocol.
1274 * @param[in] protocol
1275 * Protocol value to set if none is present in the specification.
1278 mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
1279 enum ibv_flow_spec_type search,
1283 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
1284 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
1288 for (i = 0; i != attr->num_of_specs; ++i) {
1289 if (hdr->type == search) {
1291 struct ibv_flow_spec_ipv4_ext *ipv4;
1292 struct ibv_flow_spec_ipv6 *ipv6;
1296 case IBV_FLOW_SPEC_IPV4_EXT:
1297 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
1298 if (!ip.ipv4->val.proto) {
1299 ip.ipv4->val.proto = protocol;
1300 ip.ipv4->mask.proto = 0xff;
1303 case IBV_FLOW_SPEC_IPV6:
1304 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
1305 if (!ip.ipv6->val.next_hdr) {
1306 ip.ipv6->val.next_hdr = protocol;
1307 ip.ipv6->mask.next_hdr = 0xff;
1315 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
1320 * Convert the @p item into a Verbs specification after ensuring the NIC
1321 * will understand and process it correctly.
1322 * It will also update the previous L3 layer with the protocol value matching
1324 * If the necessary size for the conversion is greater than the @p flow_size,
1325 * nothing is written in @p flow, the validation is still performed.
1328 * Pointer to Ethernet device.
1330 * Item specification.
1331 * @param[in, out] flow
1332 * Pointer to flow structure.
1333 * @param[in] flow_size
1334 * Size in bytes of the available space in @p flow, if too small, nothing is
1338 * On success the number of bytes consumed/necessary, if the returned value
1339 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1340 * otherwise another call with this returned memory size should be done.
1343 mlx5_flow_item_gre(const struct rte_flow_item *item __rte_unused,
1344 struct rte_flow *flow, const size_t flow_size)
1346 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
1347 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1348 const struct rte_flow_item_gre *spec = item->spec;
1349 const struct rte_flow_item_gre *mask = item->mask;
1350 unsigned int size = sizeof(struct ibv_flow_spec_gre);
1351 struct ibv_flow_spec_gre tunnel = {
1352 .type = IBV_FLOW_SPEC_GRE,
1356 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1357 struct ibv_flow_spec_tunnel tunnel = {
1358 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1363 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1365 mask = &rte_flow_item_gre_mask;
1367 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
1368 tunnel.val.protocol = spec->protocol;
1369 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
1370 tunnel.mask.protocol = mask->protocol;
1371 /* Remove unwanted bits from values. */
1372 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
1373 tunnel.val.protocol &= tunnel.mask.protocol;
1374 tunnel.val.key &= tunnel.mask.key;
1377 #endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
1378 if (size <= flow_size) {
1379 if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
1380 mlx5_flow_item_gre_ip_protocol_update
1381 (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
1382 MLX5_IP_PROTOCOL_GRE);
1384 mlx5_flow_item_gre_ip_protocol_update
1385 (verbs->attr, IBV_FLOW_SPEC_IPV6,
1386 MLX5_IP_PROTOCOL_GRE);
1387 mlx5_flow_spec_verbs_add(flow, &tunnel, size);
1388 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1390 flow->layers |= MLX5_FLOW_LAYER_GRE;
1395 * Convert the @p item into a Verbs specification after ensuring the NIC
1396 * will understand and process it correctly.
1397 * If the necessary size for the conversion is greater than the @p flow_size,
1398 * nothing is written in @p flow, the validation is still performed.
1401 * Item specification.
1402 * @param[in, out] flow
1403 * Pointer to flow structure.
1404 * @param[in] flow_size
1405 * Size in bytes of the available space in @p flow, if too small, nothing is
1408 * Pointer to error structure.
1411 * On success the number of bytes consumed/necessary, if the returned value
1412 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1413 * otherwise another call with this returned memory size should be done.
1414 * On error, a negative errno value is returned and rte_errno is set.
1417 mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
1418 struct rte_flow *flow __rte_unused,
1419 const size_t flow_size __rte_unused,
1420 struct rte_flow_error *error)
1422 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1423 const struct rte_flow_item_mpls *spec = item->spec;
1424 const struct rte_flow_item_mpls *mask = item->mask;
1425 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1426 struct ibv_flow_spec_mpls mpls = {
1427 .type = IBV_FLOW_SPEC_MPLS,
1432 mask = &rte_flow_item_mpls_mask;
1434 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1435 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1436 /* Remove unwanted bits from values. */
1437 mpls.val.label &= mpls.mask.label;
1439 if (size <= flow_size) {
1440 mlx5_flow_spec_verbs_add(flow, &mpls, size);
1441 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1443 flow->layers |= MLX5_FLOW_LAYER_MPLS;
1445 #endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
1446 return rte_flow_error_set(error, ENOTSUP,
1447 RTE_FLOW_ERROR_TYPE_ITEM,
1449 "MPLS is not supported by Verbs, please"
1454 * Convert the @p pattern into a Verbs specifications after ensuring the NIC
1455 * will understand and process it correctly.
1456 * The conversion is performed item per item, each of them is written into
1457 * the @p flow if its size is lesser or equal to @p flow_size.
1458 * Validation and memory consumption computation are still performed until the
1459 * end of @p pattern, unless an error is encountered.
1461 * @param[in] pattern
1463 * @param[in, out] flow
1464 * Pointer to the rte_flow structure.
1465 * @param[in] flow_size
1466 * Size in bytes of the available space in @p flow, if too small some
1467 * garbage may be present.
1469 * Pointer to error structure.
1472 * On success the number of bytes consumed/necessary, if the returned value
1473 * is lesser or equal to @p flow_size, the @pattern has fully been
1474 * converted, otherwise another call with this returned memory size should
1476 * On error, a negative errno value is returned and rte_errno is set.
1479 mlx5_flow_items(const struct rte_flow_item pattern[],
1480 struct rte_flow *flow, const size_t flow_size,
1481 struct rte_flow_error *error)
1483 int remain = flow_size;
1486 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1489 switch (pattern->type) {
1490 case RTE_FLOW_ITEM_TYPE_VOID:
1492 case RTE_FLOW_ITEM_TYPE_ETH:
1493 ret = mlx5_flow_item_eth(pattern, flow, remain);
1495 case RTE_FLOW_ITEM_TYPE_VLAN:
1496 ret = mlx5_flow_item_vlan(pattern, flow, remain);
1498 case RTE_FLOW_ITEM_TYPE_IPV4:
1499 ret = mlx5_flow_item_ipv4(pattern, flow, remain);
1501 case RTE_FLOW_ITEM_TYPE_IPV6:
1502 ret = mlx5_flow_item_ipv6(pattern, flow, remain);
1504 case RTE_FLOW_ITEM_TYPE_UDP:
1505 ret = mlx5_flow_item_udp(pattern, flow, remain);
1507 case RTE_FLOW_ITEM_TYPE_TCP:
1508 ret = mlx5_flow_item_tcp(pattern, flow, remain);
1510 case RTE_FLOW_ITEM_TYPE_VXLAN:
1511 ret = mlx5_flow_item_vxlan(pattern, flow, remain);
1513 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1514 ret = mlx5_flow_item_vxlan_gpe(pattern, flow,
1517 case RTE_FLOW_ITEM_TYPE_GRE:
1518 ret = mlx5_flow_item_gre(pattern, flow, remain);
1520 case RTE_FLOW_ITEM_TYPE_MPLS:
1521 ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
1524 return rte_flow_error_set(error, ENOTSUP,
1525 RTE_FLOW_ERROR_TYPE_ITEM,
1527 "item not supported");
1537 if (!flow->layers) {
1538 const struct rte_flow_item item = {
1539 .type = RTE_FLOW_ITEM_TYPE_ETH,
1542 return mlx5_flow_item_eth(&item, flow, flow_size);
1548 * Convert the @p action into a Verbs specification after ensuring the NIC
1549 * will understand and process it correctly.
1550 * If the necessary size for the conversion is greater than the @p flow_size,
1551 * nothing is written in @p flow, the validation is still performed.
1553 * @param[in, out] flow
1554 * Pointer to flow structure.
1555 * @param[in] flow_size
1556 * Size in bytes of the available space in @p flow, if too small, nothing is
1560 * On success the number of bytes consumed/necessary, if the returned value
1561 * is lesser or equal to @p flow_size, the @p action has fully been
1562 * converted, otherwise another call with this returned memory size should
1564 * On error, a negative errno value is returned and rte_errno is set.
1567 mlx5_flow_action_drop(struct rte_flow *flow, const size_t flow_size)
1569 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1570 struct ibv_flow_spec_action_drop drop = {
1571 .type = IBV_FLOW_SPEC_ACTION_DROP,
1575 if (size < flow_size)
1576 mlx5_flow_spec_verbs_add(flow, &drop, size);
1577 flow->fate |= MLX5_FLOW_FATE_DROP;
1582 * Convert the @p action into @p flow after ensuring the NIC will understand
1583 * and process it correctly.
1586 * Action configuration.
1587 * @param[in, out] flow
1588 * Pointer to flow structure.
1591 * 0 on success, a negative errno value otherwise and rte_errno is set.
1594 mlx5_flow_action_queue(const struct rte_flow_action *action,
1595 struct rte_flow *flow)
1597 const struct rte_flow_action_queue *queue = action->conf;
1600 (*flow->queue)[0] = queue->index;
1601 flow->rss.queue_num = 1;
1602 flow->fate |= MLX5_FLOW_FATE_QUEUE;
1607 * Ensure the @p action will be understood and used correctly by the NIC.
1610 * Action configuration.
1611 * @param flow[in, out]
1612 * Pointer to the rte_flow structure.
1618 mlx5_flow_action_rss(const struct rte_flow_action *action,
1619 struct rte_flow *flow)
1621 const struct rte_flow_action_rss *rss = action->conf;
1624 memcpy((*flow->queue), rss->queue,
1625 rss->queue_num * sizeof(uint16_t));
1626 flow->rss.queue_num = rss->queue_num;
1627 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1628 flow->rss.types = rss->types;
1629 flow->rss.level = rss->level;
1630 flow->fate |= MLX5_FLOW_FATE_RSS;
1635 * Convert the @p action into a Verbs specification after ensuring the NIC
1636 * will understand and process it correctly.
1637 * If the necessary size for the conversion is greater than the @p flow_size,
1638 * nothing is written in @p flow, the validation is still performed.
1640 * @param[in, out] flow
1641 * Pointer to flow structure.
1642 * @param[in] flow_size
1643 * Size in bytes of the available space in @p flow, if too small, nothing is
1647 * On success the number of bytes consumed/necessary, if the returned value
1648 * is lesser or equal to @p flow_size, the @p action has fully been
1649 * converted, otherwise another call with this returned memory size should
1653 mlx5_flow_action_flag(struct rte_flow *flow, const size_t flow_size)
1655 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1656 struct ibv_flow_spec_action_tag tag = {
1657 .type = IBV_FLOW_SPEC_ACTION_TAG,
1659 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1661 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
1663 if (flow->modifier & MLX5_FLOW_MOD_MARK)
1665 else if (size <= flow_size && verbs)
1666 mlx5_flow_spec_verbs_add(flow, &tag, size);
1667 flow->modifier |= MLX5_FLOW_MOD_FLAG;
1672 * Update verbs specification to modify the flag to mark.
1674 * @param[in, out] verbs
1675 * Pointer to the mlx5_flow_verbs structure.
1676 * @param[in] mark_id
1677 * Mark identifier to replace the flag.
1680 mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
1682 struct ibv_spec_header *hdr;
1687 /* Update Verbs specification. */
1688 hdr = (struct ibv_spec_header *)verbs->specs;
1691 for (i = 0; i != verbs->attr->num_of_specs; ++i) {
1692 if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
1693 struct ibv_flow_spec_action_tag *t =
1694 (struct ibv_flow_spec_action_tag *)hdr;
1696 t->tag_id = mlx5_flow_mark_set(mark_id);
1698 hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
1703 * Convert the @p action into @p flow (or by updating the already present
1704 * Flag Verbs specification) after ensuring the NIC will understand and
1705 * process it correctly.
1706 * If the necessary size for the conversion is greater than the @p flow_size,
1707 * nothing is written in @p flow, the validation is still performed.
1710 * Action configuration.
1711 * @param[in, out] flow
1712 * Pointer to flow structure.
1713 * @param[in] flow_size
1714 * Size in bytes of the available space in @p flow, if too small, nothing is
1718 * On success the number of bytes consumed/necessary, if the returned value
1719 * is lesser or equal to @p flow_size, the @p action has fully been
1720 * converted, otherwise another call with this returned memory size should
1724 mlx5_flow_action_mark(const struct rte_flow_action *action,
1725 struct rte_flow *flow, const size_t flow_size)
1727 const struct rte_flow_action_mark *mark = action->conf;
1728 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1729 struct ibv_flow_spec_action_tag tag = {
1730 .type = IBV_FLOW_SPEC_ACTION_TAG,
1733 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
1735 if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
1736 mlx5_flow_verbs_mark_update(verbs, mark->id);
1738 } else if (size <= flow_size) {
1739 tag.tag_id = mlx5_flow_mark_set(mark->id);
1740 mlx5_flow_spec_verbs_add(flow, &tag, size);
1742 flow->modifier |= MLX5_FLOW_MOD_MARK;
1747 * Convert the @p action into a Verbs specification after ensuring the NIC
1748 * will understand and process it correctly.
1749 * If the necessary size for the conversion is greater than the @p flow_size,
1750 * nothing is written in @p flow, the validation is still performed.
1753 * Action configuration.
1754 * @param flow[in, out]
1755 * Pointer to flow structure.
1756 * @param flow_size[in]
1757 * Size in bytes of the available space in @p flow, if too small, nothing is
1759 * @param error[int, out]
1760 * Pointer to error structure.
1763 * On success the number of bytes consumed/necessary, if the returned value
1764 * is lesser or equal to @p flow_size, the @p action has fully been
1765 * converted, otherwise another call with this returned memory size should
1767 * On error, a negative errno value is returned and rte_errno is set.
1770 mlx5_flow_action_count(struct rte_eth_dev *dev,
1771 const struct rte_flow_action *action,
1772 struct rte_flow *flow,
1773 const size_t flow_size __rte_unused,
1774 struct rte_flow_error *error)
1776 const struct rte_flow_action_count *count = action->conf;
1777 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
1778 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1779 struct ibv_flow_spec_counter_action counter = {
1780 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1785 if (!flow->counter) {
1786 flow->counter = mlx5_flow_counter_new(dev, count->shared,
1789 return rte_flow_error_set(error, ENOTSUP,
1790 RTE_FLOW_ERROR_TYPE_ACTION,
1792 "cannot get counter"
1795 flow->modifier |= MLX5_FLOW_MOD_COUNT;
1796 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
1797 counter.counter_set_handle = flow->counter->cs->handle;
1798 if (size <= flow_size)
1799 mlx5_flow_spec_verbs_add(flow, &counter, size);
1806 * Convert the @p action into @p flow after ensuring the NIC will understand
1807 * and process it correctly.
1808 * The conversion is performed action per action, each of them is written into
1809 * the @p flow if its size is lesser or equal to @p flow_size.
1810 * Validation and memory consumption computation are still performed until the
1811 * end of @p action, unless an error is encountered.
1814 * Pointer to Ethernet device structure.
1815 * @param[in] actions
1816 * Pointer to flow actions array.
1817 * @param[in, out] flow
1818 * Pointer to the rte_flow structure.
1819 * @param[in] flow_size
1820 * Size in bytes of the available space in @p flow, if too small some
1821 * garbage may be present.
1823 * Pointer to error structure.
1826 * On success the number of bytes consumed/necessary, if the returned value
1827 * is lesser or equal to @p flow_size, the @p actions has fully been
1828 * converted, otherwise another call with this returned memory size should
1830 * On error, a negative errno value is returned and rte_errno is set.
1833 mlx5_flow_actions(struct rte_eth_dev *dev,
1834 const struct rte_flow_action actions[],
1835 struct rte_flow *flow, const size_t flow_size,
1836 struct rte_flow_error *error)
1839 int remain = flow_size;
1842 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1843 switch (actions->type) {
1844 case RTE_FLOW_ACTION_TYPE_VOID:
1846 case RTE_FLOW_ACTION_TYPE_FLAG:
1847 ret = mlx5_flow_action_flag(flow, remain);
1849 case RTE_FLOW_ACTION_TYPE_MARK:
1850 ret = mlx5_flow_action_mark(actions, flow, remain);
1852 case RTE_FLOW_ACTION_TYPE_DROP:
1853 ret = mlx5_flow_action_drop(flow, remain);
1855 case RTE_FLOW_ACTION_TYPE_QUEUE:
1856 ret = mlx5_flow_action_queue(actions, flow);
1858 case RTE_FLOW_ACTION_TYPE_RSS:
1859 ret = mlx5_flow_action_rss(actions, flow);
1861 case RTE_FLOW_ACTION_TYPE_COUNT:
1862 ret = mlx5_flow_action_count(dev, actions, flow, remain,
1866 return rte_flow_error_set(error, ENOTSUP,
1867 RTE_FLOW_ERROR_TYPE_ACTION,
1869 "action not supported");
1880 return rte_flow_error_set(error, ENOTSUP,
1881 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1883 "no fate action found");
1888 * Validate flow rule and fill flow structure accordingly.
1891 * Pointer to Ethernet device.
1893 * Pointer to flow structure.
1895 * Size of allocated space for @p flow.
1897 * Flow rule attributes.
1898 * @param[in] pattern
1899 * Pattern specification (list terminated by the END pattern item).
1900 * @param[in] actions
1901 * Associated actions (list terminated by the END action).
1903 * Perform verbose error reporting if not NULL.
1906 * A positive value representing the size of the flow object in bytes
1907 * regardless of @p flow_size on success, a negative errno value otherwise
1908 * and rte_errno is set.
1911 mlx5_flow_merge_switch(struct rte_eth_dev *dev,
1912 struct rte_flow *flow,
1914 const struct rte_flow_attr *attr,
1915 const struct rte_flow_item pattern[],
1916 const struct rte_flow_action actions[],
1917 struct rte_flow_error *error)
1919 unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
1920 uint16_t port_id[!n + n];
1921 struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
1922 size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
1924 unsigned int own = 0;
1927 /* At least one port is needed when no switch domain is present. */
1930 port_id[0] = dev->data->port_id;
1932 n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
1934 for (i = 0; i != n; ++i) {
1935 struct rte_eth_dev_info dev_info;
1937 rte_eth_dev_info_get(port_id[i], &dev_info);
1938 if (port_id[i] == dev->data->port_id)
1940 ptoi[i].port_id = port_id[i];
1941 ptoi[i].ifindex = dev_info.if_index;
1943 /* Ensure first entry of ptoi[] is the current device. */
1946 ptoi[0] = ptoi[own];
1947 ptoi[own] = ptoi[n];
1949 /* An entry with zero ifindex terminates ptoi[]. */
1950 ptoi[n].port_id = 0;
1951 ptoi[n].ifindex = 0;
1952 if (flow_size < off)
1954 ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
1955 flow_size ? flow_size - off : 0,
1956 ptoi, attr, pattern, actions, error);
1960 *flow = (struct rte_flow){
1961 .attributes = *attr,
1962 .nl_flow = (uint8_t *)flow + off,
1965 * Generate a reasonably unique handle based on the address
1966 * of the target buffer.
1968 * This is straightforward on 32-bit systems where the flow
1969 * pointer can be used directly. Otherwise, its least
1970 * significant part is taken after shifting it by the
1971 * previous power of two of the pointed buffer size.
1973 if (sizeof(flow) <= 4)
1974 mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
1979 rte_log2_u32(rte_align32prevpow2(flow_size)));
1985 mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
1987 const struct rte_flow_item *item;
1988 unsigned int has_vlan = 0;
1990 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1991 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1997 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
1998 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
1999 return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2000 MLX5_EXPANSION_ROOT_OUTER;
2004 * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
2005 * after ensuring the NIC will understand and process it correctly.
2006 * The conversion is only performed item/action per item/action, each of
2007 * them is written into the @p flow if its size is lesser or equal to @p
2009 * Validation and memory consumption computation are still performed until the
2010 * end, unless an error is encountered.
2013 * Pointer to Ethernet device.
2014 * @param[in, out] flow
2015 * Pointer to flow structure.
2016 * @param[in] flow_size
2017 * Size in bytes of the available space in @p flow, if too small some
2018 * garbage may be present.
2019 * @param[in] attributes
2020 * Flow rule attributes.
2021 * @param[in] pattern
2022 * Pattern specification (list terminated by the END pattern item).
2023 * @param[in] actions
2024 * Associated actions (list terminated by the END action).
2026 * Perform verbose error reporting if not NULL.
2029 * On success the number of bytes consumed/necessary, if the returned value
2030 * is lesser or equal to @p flow_size, the flow has fully been converted and
2031 * can be applied, otherwise another call with this returned memory size
2033 * On error, a negative errno value is returned and rte_errno is set.
2036 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
2037 const size_t flow_size,
2038 const struct rte_flow_attr *attributes,
2039 const struct rte_flow_item pattern[],
2040 const struct rte_flow_action actions[],
2041 struct rte_flow_error *error)
2043 struct rte_flow local_flow = { .layers = 0, };
2044 size_t size = sizeof(*flow);
2046 struct rte_flow_expand_rss buf;
2047 uint8_t buffer[2048];
2049 struct rte_flow_expand_rss *buf = &expand_buffer.buf;
2050 struct mlx5_flow_verbs *original_verbs = NULL;
2051 size_t original_verbs_size = 0;
2052 uint32_t original_layers = 0;
2053 int expanded_pattern_idx = 0;
2057 if (attributes->transfer)
2058 return mlx5_flow_merge_switch(dev, flow, flow_size,
2059 attributes, pattern,
2061 if (size > flow_size)
2063 ret = mlx5_flow_attributes(dev->data->dev_private, attributes, flow);
2066 ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
2069 if (local_flow.rss.types) {
2070 unsigned int graph_root;
2072 graph_root = mlx5_find_graph_root(pattern,
2073 local_flow.rss.level);
2074 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
2075 pattern, local_flow.rss.types,
2076 mlx5_support_expansion,
2079 (unsigned int)ret < sizeof(expand_buffer.buffer));
2082 buf->entry[0].pattern = (void *)(uintptr_t)pattern;
2084 size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
2086 if (size <= flow_size)
2087 flow->queue = (void *)(flow + 1);
2088 LIST_INIT(&flow->verbs);
2092 for (i = 0; i != buf->entries; ++i) {
2096 flow->layers = original_layers;
2097 size += sizeof(struct ibv_flow_attr) +
2098 sizeof(struct mlx5_flow_verbs);
2100 if (size < flow_size) {
2101 flow->cur_verbs = (void *)((uintptr_t)flow + off);
2102 flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
2103 flow->cur_verbs->specs =
2104 (void *)(flow->cur_verbs->attr + 1);
2106 /* First iteration convert the pattern into Verbs. */
2108 /* Actions don't need to be converted several time. */
2109 ret = mlx5_flow_actions(dev, actions, flow,
2110 (size < flow_size) ?
2111 flow_size - size : 0,
2118 * Next iteration means the pattern has already been
2119 * converted and an expansion is necessary to match
2120 * the user RSS request. For that only the expanded
2121 * items will be converted, the common part with the
2122 * user pattern are just copied into the next buffer
2125 size += original_verbs_size;
2126 if (size < flow_size) {
2127 rte_memcpy(flow->cur_verbs->attr,
2128 original_verbs->attr,
2129 original_verbs_size +
2130 sizeof(struct ibv_flow_attr));
2131 flow->cur_verbs->size = original_verbs_size;
2134 ret = mlx5_flow_items
2135 ((const struct rte_flow_item *)
2136 &buf->entry[i].pattern[expanded_pattern_idx],
2138 (size < flow_size) ? flow_size - size : 0, error);
2142 if (size <= flow_size) {
2143 mlx5_flow_adjust_priority(dev, flow);
2144 LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
2147 * Keep a pointer of the first verbs conversion and the layers
2148 * it has encountered.
2151 original_verbs = flow->cur_verbs;
2152 original_verbs_size = size - off2;
2153 original_layers = flow->layers;
2155 * move the index of the expanded pattern to the
2156 * first item not addressed yet.
2158 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2159 expanded_pattern_idx++;
2161 const struct rte_flow_item *item = pattern;
2163 for (item = pattern;
2164 item->type != RTE_FLOW_ITEM_TYPE_END;
2166 expanded_pattern_idx++;
2170 /* Restore the origin layers in the flow. */
2171 flow->layers = original_layers;
2176 * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
2177 * if several tunnel rules are used on this queue, the tunnel ptype will be
2181 * Rx queue to update.
2184 mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
2187 uint32_t tunnel_ptype = 0;
2189 /* Look up for the ptype to use. */
2190 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
2191 if (!rxq_ctrl->flow_tunnels_n[i])
2193 if (!tunnel_ptype) {
2194 tunnel_ptype = tunnels_info[i].ptype;
2200 rxq_ctrl->rxq.tunnel = tunnel_ptype;
2204 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
2207 * Pointer to Ethernet device.
2209 * Pointer to flow structure.
2212 mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
2214 struct priv *priv = dev->data->dev_private;
2215 const int mark = !!(flow->modifier &
2216 (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
2217 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
2220 for (i = 0; i != flow->rss.queue_num; ++i) {
2221 int idx = (*flow->queue)[i];
2222 struct mlx5_rxq_ctrl *rxq_ctrl =
2223 container_of((*priv->rxqs)[idx],
2224 struct mlx5_rxq_ctrl, rxq);
2227 rxq_ctrl->rxq.mark = 1;
2228 rxq_ctrl->flow_mark_n++;
2233 /* Increase the counter matching the flow. */
2234 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
2235 if ((tunnels_info[j].tunnel & flow->layers) ==
2236 tunnels_info[j].tunnel) {
2237 rxq_ctrl->flow_tunnels_n[j]++;
2241 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
2247 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
2248 * @p flow if no other flow uses it with the same kind of request.
2251 * Pointer to Ethernet device.
2253 * Pointer to the flow.
2256 mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
2258 struct priv *priv = dev->data->dev_private;
2259 const int mark = !!(flow->modifier &
2260 (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
2261 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
2264 assert(dev->data->dev_started);
2265 for (i = 0; i != flow->rss.queue_num; ++i) {
2266 int idx = (*flow->queue)[i];
2267 struct mlx5_rxq_ctrl *rxq_ctrl =
2268 container_of((*priv->rxqs)[idx],
2269 struct mlx5_rxq_ctrl, rxq);
2272 rxq_ctrl->flow_mark_n--;
2273 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
2278 /* Decrease the counter matching the flow. */
2279 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
2280 if ((tunnels_info[j].tunnel & flow->layers) ==
2281 tunnels_info[j].tunnel) {
2282 rxq_ctrl->flow_tunnels_n[j]--;
2286 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
2292 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
2295 * Pointer to Ethernet device.
2298 mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
2300 struct priv *priv = dev->data->dev_private;
2303 for (i = 0; i != priv->rxqs_n; ++i) {
2304 struct mlx5_rxq_ctrl *rxq_ctrl;
2307 if (!(*priv->rxqs)[i])
2309 rxq_ctrl = container_of((*priv->rxqs)[i],
2310 struct mlx5_rxq_ctrl, rxq);
2311 rxq_ctrl->flow_mark_n = 0;
2312 rxq_ctrl->rxq.mark = 0;
2313 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
2314 rxq_ctrl->flow_tunnels_n[j] = 0;
2315 rxq_ctrl->rxq.tunnel = 0;
2320 * Validate the flag action.
2322 * @param[in] action_flags
2323 * Bit-fields that holds the actions detected until now.
2325 * Pointer to error structure.
2328 * 0 on success, a negative errno value otherwise and rte_ernno is set.
2331 mlx5_flow_validate_action_flag(uint64_t action_flags,
2332 struct rte_flow_error *error)
2335 if (action_flags & MLX5_FLOW_ACTION_DROP)
2336 return rte_flow_error_set(error, EINVAL,
2337 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2338 "can't drop and flag in same flow");
2339 if (action_flags & MLX5_FLOW_ACTION_MARK)
2340 return rte_flow_error_set(error, EINVAL,
2341 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2342 "can't mark and flag in same flow");
2343 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2344 return rte_flow_error_set(error, EINVAL,
2345 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2347 " actions in same flow");
2352 * Validate the mark action.
2355 * Pointer to the queue action.
2356 * @param[in] action_flags
2357 * Bit-fields that holds the actions detected until now.
2359 * Pointer to error structure.
2362 * 0 on success, a negative errno value otherwise and rte_errno is set.
2365 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
2366 uint64_t action_flags,
2367 struct rte_flow_error *error)
2369 const struct rte_flow_action_mark *mark = action->conf;
2372 return rte_flow_error_set(error, EINVAL,
2373 RTE_FLOW_ERROR_TYPE_ACTION,
2375 "configuration cannot be null");
2376 if (mark->id >= MLX5_FLOW_MARK_MAX)
2377 return rte_flow_error_set(error, EINVAL,
2378 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2380 "mark id must in 0 <= id < "
2381 RTE_STR(MLX5_FLOW_MARK_MAX));
2382 if (action_flags & MLX5_FLOW_ACTION_DROP)
2383 return rte_flow_error_set(error, EINVAL,
2384 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2385 "can't drop and mark in same flow");
2386 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2387 return rte_flow_error_set(error, EINVAL,
2388 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2389 "can't flag and mark in same flow");
2390 if (action_flags & MLX5_FLOW_ACTION_MARK)
2391 return rte_flow_error_set(error, EINVAL,
2392 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2393 "can't have 2 mark actions in same"
2399 * Validate the drop action.
2401 * @param[in] action_flags
2402 * Bit-fields that holds the actions detected until now.
2404 * Pointer to error structure.
2407 * 0 on success, a negative errno value otherwise and rte_ernno is set.
2410 mlx5_flow_validate_action_drop(uint64_t action_flags,
2411 struct rte_flow_error *error)
2413 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2414 return rte_flow_error_set(error, EINVAL,
2415 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2416 "can't drop and flag in same flow");
2417 if (action_flags & MLX5_FLOW_ACTION_MARK)
2418 return rte_flow_error_set(error, EINVAL,
2419 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2420 "can't drop and mark in same flow");
2421 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2422 return rte_flow_error_set(error, EINVAL,
2423 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2424 "can't have 2 fate actions in"
2431 * Validate the queue action.
2434 * Pointer to the queue action.
2435 * @param[in] action_flags
2436 * Bit-fields that holds the actions detected until now.
2438 * Pointer to the Ethernet device structure.
2440 * Pointer to error structure.
2443 * 0 on success, a negative errno value otherwise and rte_ernno is set.
2446 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
2447 uint64_t action_flags,
2448 struct rte_eth_dev *dev,
2449 struct rte_flow_error *error)
2451 struct priv *priv = dev->data->dev_private;
2452 const struct rte_flow_action_queue *queue = action->conf;
2454 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2455 return rte_flow_error_set(error, EINVAL,
2456 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2457 "can't have 2 fate actions in"
2459 if (queue->index >= priv->rxqs_n)
2460 return rte_flow_error_set(error, EINVAL,
2461 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2463 "queue index out of range");
2464 if (!(*priv->rxqs)[queue->index])
2465 return rte_flow_error_set(error, EINVAL,
2466 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2468 "queue is not configured");
2474 * Validate the rss action.
2477 * Pointer to the queue action.
2478 * @param[in] action_flags
2479 * Bit-fields that holds the actions detected until now.
2481 * Pointer to the Ethernet device structure.
2483 * Pointer to error structure.
2486 * 0 on success, a negative errno value otherwise and rte_ernno is set.
2489 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
2490 uint64_t action_flags,
2491 struct rte_eth_dev *dev,
2492 struct rte_flow_error *error)
2494 struct priv *priv = dev->data->dev_private;
2495 const struct rte_flow_action_rss *rss = action->conf;
2498 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2499 return rte_flow_error_set(error, EINVAL,
2500 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2501 "can't have 2 fate actions"
2503 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
2504 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
2505 return rte_flow_error_set(error, ENOTSUP,
2506 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2508 "RSS hash function not supported");
2509 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2514 return rte_flow_error_set(error, ENOTSUP,
2515 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2517 "tunnel RSS is not supported");
2518 if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
2519 return rte_flow_error_set(error, ENOTSUP,
2520 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2522 "RSS hash key too small");
2523 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
2524 return rte_flow_error_set(error, ENOTSUP,
2525 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2527 "RSS hash key too large");
2528 if (rss->queue_num > priv->config.ind_table_max_size)
2529 return rte_flow_error_set(error, ENOTSUP,
2530 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2532 "number of queues too large");
2533 if (rss->types & MLX5_RSS_HF_MASK)
2534 return rte_flow_error_set(error, ENOTSUP,
2535 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2537 "some RSS protocols are not"
2539 for (i = 0; i != rss->queue_num; ++i) {
2540 if (!(*priv->rxqs)[rss->queue[i]])
2541 return rte_flow_error_set
2542 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2543 &rss->queue[i], "queue is not configured");
2549 * Validate the count action.
2552 * Pointer to the Ethernet device structure.
2554 * Pointer to error structure.
2557 * 0 on success, a negative errno value otherwise and rte_ernno is set.
2560 mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
2561 struct rte_flow_error *error)
2563 struct priv *priv = dev->data->dev_private;
2565 if (!priv->config.flow_counter_en)
2566 return rte_flow_error_set(error, ENOTSUP,
2567 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2568 "flow counters are not supported.");
2573 * Verify the @p attributes will be correctly understood by the NIC and store
2574 * them in the @p flow if everything is correct.
2577 * Pointer to the Ethernet device structure.
2578 * @param[in] attributes
2579 * Pointer to flow attributes
2581 * Pointer to error structure.
2584 * 0 on success, a negative errno value otherwise and rte_errno is set.
2587 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
2588 const struct rte_flow_attr *attributes,
2589 struct rte_flow_error *error)
2591 struct priv *priv = dev->data->dev_private;
2592 uint32_t priority_max = priv->config.flow_prio - 1;
2594 if (attributes->group)
2595 return rte_flow_error_set(error, ENOTSUP,
2596 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2597 NULL, "groups is not supported");
2598 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2599 attributes->priority >= priority_max)
2600 return rte_flow_error_set(error, ENOTSUP,
2601 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2602 NULL, "priority out of range");
2603 if (attributes->egress)
2604 return rte_flow_error_set(error, ENOTSUP,
2605 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2606 "egress is not supported");
2607 if (attributes->transfer)
2608 return rte_flow_error_set(error, ENOTSUP,
2609 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2610 NULL, "transfer is not supported");
2611 if (!attributes->ingress)
2612 return rte_flow_error_set(error, EINVAL,
2613 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2615 "ingress attribute is mandatory");
2620 * Validate Ethernet item.
2623 * Item specification.
2624 * @param[in] item_flags
2625 * Bit-fields that holds the items detected until now.
2627 * Pointer to error structure.
2630 * 0 on success, a negative errno value otherwise and rte_errno is set.
2633 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
2634 uint64_t item_flags,
2635 struct rte_flow_error *error)
2637 const struct rte_flow_item_eth *mask = item->mask;
2638 const struct rte_flow_item_eth nic_mask = {
2639 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2640 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2641 .type = RTE_BE16(0xffff),
2644 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2646 if (item_flags & MLX5_FLOW_LAYER_OUTER_L2)
2647 return rte_flow_error_set(error, ENOTSUP,
2648 RTE_FLOW_ERROR_TYPE_ITEM, item,
2649 "3 levels of l2 are not supported");
2650 if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel)
2651 return rte_flow_error_set(error, ENOTSUP,
2652 RTE_FLOW_ERROR_TYPE_ITEM, item,
2653 "2 L2 without tunnel are not supported");
2655 mask = &rte_flow_item_eth_mask;
2656 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2657 (const uint8_t *)&nic_mask,
2658 sizeof(struct rte_flow_item_eth),
2664 * Validate VLAN item.
2667 * Item specification.
2668 * @param[in] item_flags
2669 * Bit-fields that holds the items detected until now.
2671 * Pointer to error structure.
2674 * 0 on success, a negative errno value otherwise and rte_errno is set.
2677 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
2679 struct rte_flow_error *error)
2681 const struct rte_flow_item_vlan *spec = item->spec;
2682 const struct rte_flow_item_vlan *mask = item->mask;
2683 const struct rte_flow_item_vlan nic_mask = {
2684 .tci = RTE_BE16(0x0fff),
2685 .inner_type = RTE_BE16(0xffff),
2687 uint16_t vlan_tag = 0;
2688 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2690 const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2691 MLX5_FLOW_LAYER_INNER_L4) :
2692 (MLX5_FLOW_LAYER_OUTER_L3 |
2693 MLX5_FLOW_LAYER_OUTER_L4);
2694 const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2695 MLX5_FLOW_LAYER_OUTER_VLAN;
2697 if (item_flags & vlanm)
2698 return rte_flow_error_set(error, EINVAL,
2699 RTE_FLOW_ERROR_TYPE_ITEM, item,
2700 "VLAN layer already configured");
2701 else if ((item_flags & l34m) != 0)
2702 return rte_flow_error_set(error, EINVAL,
2703 RTE_FLOW_ERROR_TYPE_ITEM, item,
2704 "L2 layer cannot follow L3/L4 layer");
2706 mask = &rte_flow_item_vlan_mask;
2707 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2708 (const uint8_t *)&nic_mask,
2709 sizeof(struct rte_flow_item_vlan),
2714 vlan_tag = spec->tci;
2715 vlan_tag &= mask->tci;
2718 * From verbs perspective an empty VLAN is equivalent
2719 * to a packet without VLAN layer.
2722 return rte_flow_error_set(error, EINVAL,
2723 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2725 "VLAN cannot be empty");
2730 * Validate IPV4 item.
2733 * Item specification.
2734 * @param[in] item_flags
2735 * Bit-fields that holds the items detected until now.
2737 * Pointer to error structure.
2740 * 0 on success, a negative errno value otherwise and rte_errno is set.
2743 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2745 struct rte_flow_error *error)
2747 const struct rte_flow_item_ipv4 *mask = item->mask;
2748 const struct rte_flow_item_ipv4 nic_mask = {
2750 .src_addr = RTE_BE32(0xffffffff),
2751 .dst_addr = RTE_BE32(0xffffffff),
2752 .type_of_service = 0xff,
2753 .next_proto_id = 0xff,
2756 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2759 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2760 MLX5_FLOW_LAYER_OUTER_L3))
2761 return rte_flow_error_set(error, ENOTSUP,
2762 RTE_FLOW_ERROR_TYPE_ITEM, item,
2763 "multiple L3 layers not supported");
2764 else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2765 MLX5_FLOW_LAYER_OUTER_L4))
2766 return rte_flow_error_set(error, EINVAL,
2767 RTE_FLOW_ERROR_TYPE_ITEM, item,
2768 "L3 cannot follow an L4 layer.");
2770 mask = &rte_flow_item_ipv4_mask;
2771 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2772 (const uint8_t *)&nic_mask,
2773 sizeof(struct rte_flow_item_ipv4),
2781 * Validate IPV6 item.
2784 * Item specification.
2785 * @param[in] item_flags
2786 * Bit-fields that holds the items detected until now.
2788 * Pointer to error structure.
2791 * 0 on success, a negative errno value otherwise and rte_errno is set.
2794 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2795 uint64_t item_flags,
2796 struct rte_flow_error *error)
2798 const struct rte_flow_item_ipv6 *mask = item->mask;
2799 const struct rte_flow_item_ipv6 nic_mask = {
2802 "\xff\xff\xff\xff\xff\xff\xff\xff"
2803 "\xff\xff\xff\xff\xff\xff\xff\xff",
2805 "\xff\xff\xff\xff\xff\xff\xff\xff"
2806 "\xff\xff\xff\xff\xff\xff\xff\xff",
2807 .vtc_flow = RTE_BE32(0xffffffff),
2812 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2815 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2816 MLX5_FLOW_LAYER_OUTER_L3))
2817 return rte_flow_error_set(error, ENOTSUP,
2818 RTE_FLOW_ERROR_TYPE_ITEM, item,
2819 "multiple L3 layers not supported");
2820 else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2821 MLX5_FLOW_LAYER_OUTER_L4))
2822 return rte_flow_error_set(error, EINVAL,
2823 RTE_FLOW_ERROR_TYPE_ITEM, item,
2824 "L3 cannot follow an L4 layer.");
2826 * IPv6 is not recognised by the NIC inside a GRE tunnel.
2827 * Such support has to be disabled as the rule will be
2828 * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
2829 * Mellanox OFED 4.4-1.0.0.0.
2831 if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE)
2832 return rte_flow_error_set(error, ENOTSUP,
2833 RTE_FLOW_ERROR_TYPE_ITEM, item,
2834 "IPv6 inside a GRE tunnel is"
2835 " not recognised.");
2837 mask = &rte_flow_item_ipv6_mask;
2838 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2839 (const uint8_t *)&nic_mask,
2840 sizeof(struct rte_flow_item_ipv6),
2848 * Validate UDP item.
2851 * Item specification.
2852 * @param[in] item_flags
2853 * Bit-fields that holds the items detected until now.
2854 * @param[in] target_protocol
2855 * The next protocol in the previous item.
2857 * Pointer to error structure.
2860 * 0 on success, a negative errno value otherwise and rte_errno is set.
2863 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2864 uint64_t item_flags,
2865 uint8_t target_protocol,
2866 struct rte_flow_error *error)
2868 const struct rte_flow_item_udp *mask = item->mask;
2869 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2872 if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_UDP)
2873 return rte_flow_error_set(error, EINVAL,
2874 RTE_FLOW_ERROR_TYPE_ITEM, item,
2875 "protocol filtering not compatible"
2877 if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2878 MLX5_FLOW_LAYER_OUTER_L3)))
2879 return rte_flow_error_set(error, EINVAL,
2880 RTE_FLOW_ERROR_TYPE_ITEM, item,
2881 "L3 is mandatory to filter on L4");
2882 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2883 MLX5_FLOW_LAYER_OUTER_L4))
2884 return rte_flow_error_set(error, EINVAL,
2885 RTE_FLOW_ERROR_TYPE_ITEM, item,
2886 "L4 layer is already present");
2888 mask = &rte_flow_item_udp_mask;
2889 ret = mlx5_flow_item_acceptable
2890 (item, (const uint8_t *)mask,
2891 (const uint8_t *)&rte_flow_item_udp_mask,
2892 sizeof(struct rte_flow_item_udp), error);
2899 * Validate TCP item.
2902 * Item specification.
2903 * @param[in] item_flags
2904 * Bit-fields that holds the items detected until now.
2905 * @param[in] target_protocol
2906 * The next protocol in the previous item.
2908 * Pointer to error structure.
2911 * 0 on success, a negative errno value otherwise and rte_errno is set.
2914 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2915 uint64_t item_flags,
2916 uint8_t target_protocol,
2917 struct rte_flow_error *error)
2919 const struct rte_flow_item_tcp *mask = item->mask;
2920 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2923 if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_TCP)
2924 return rte_flow_error_set(error, EINVAL,
2925 RTE_FLOW_ERROR_TYPE_ITEM, item,
2926 "protocol filtering not compatible"
2928 if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2929 MLX5_FLOW_LAYER_OUTER_L3)))
2930 return rte_flow_error_set(error, EINVAL,
2931 RTE_FLOW_ERROR_TYPE_ITEM, item,
2932 "L3 is mandatory to filter on L4");
2933 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2934 MLX5_FLOW_LAYER_OUTER_L4))
2935 return rte_flow_error_set(error, EINVAL,
2936 RTE_FLOW_ERROR_TYPE_ITEM, item,
2937 "L4 layer is already present");
2939 mask = &rte_flow_item_tcp_mask;
2940 ret = mlx5_flow_item_acceptable
2941 (item, (const uint8_t *)mask,
2942 (const uint8_t *)&rte_flow_item_tcp_mask,
2943 sizeof(struct rte_flow_item_tcp), error);
2950 * Validate VXLAN item.
2953 * Item specification.
2954 * @param[in] item_flags
2955 * Bit-fields that holds the items detected until now.
2956 * @param[in] target_protocol
2957 * The next protocol in the previous item.
2959 * Pointer to error structure.
2962 * 0 on success, a negative errno value otherwise and rte_errno is set.
2965 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2966 uint64_t item_flags,
2967 struct rte_flow_error *error)
2969 const struct rte_flow_item_vxlan *spec = item->spec;
2970 const struct rte_flow_item_vxlan *mask = item->mask;
2975 } id = { .vlan_id = 0, };
2976 uint32_t vlan_id = 0;
2979 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2980 return rte_flow_error_set(error, ENOTSUP,
2981 RTE_FLOW_ERROR_TYPE_ITEM, item,
2982 "a tunnel is already present");
2984 * Verify only UDPv4 is present as defined in
2985 * https://tools.ietf.org/html/rfc7348
2987 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2988 return rte_flow_error_set(error, EINVAL,
2989 RTE_FLOW_ERROR_TYPE_ITEM, item,
2990 "no outer UDP layer found");
2992 mask = &rte_flow_item_vxlan_mask;
2993 ret = mlx5_flow_item_acceptable
2994 (item, (const uint8_t *)mask,
2995 (const uint8_t *)&rte_flow_item_vxlan_mask,
2996 sizeof(struct rte_flow_item_vxlan),
3001 memcpy(&id.vni[1], spec->vni, 3);
3002 vlan_id = id.vlan_id;
3003 memcpy(&id.vni[1], mask->vni, 3);
3004 vlan_id &= id.vlan_id;
3007 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
3008 * only this layer is defined in the Verbs specification it is
3009 * interpreted as wildcard and all packets will match this
3010 * rule, if it follows a full stack layer (ex: eth / ipv4 /
3011 * udp), all packets matching the layers before will also
3012 * match this rule. To avoid such situation, VNI 0 is
3013 * currently refused.
3016 return rte_flow_error_set(error, ENOTSUP,
3017 RTE_FLOW_ERROR_TYPE_ITEM, item,
3018 "VXLAN vni cannot be 0");
3019 if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
3020 return rte_flow_error_set(error, ENOTSUP,
3021 RTE_FLOW_ERROR_TYPE_ITEM, item,
3022 "VXLAN tunnel must be fully defined");
3027 * Validate VXLAN_GPE item.
3030 * Item specification.
3031 * @param[in] item_flags
3032 * Bit-fields that holds the items detected until now.
3034 * Pointer to the private data structure.
3035 * @param[in] target_protocol
3036 * The next protocol in the previous item.
3038 * Pointer to error structure.
3041 * 0 on success, a negative errno value otherwise and rte_errno is set.
3044 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
3045 uint64_t item_flags,
3046 struct rte_eth_dev *dev,
3047 struct rte_flow_error *error)
3049 struct priv *priv = dev->data->dev_private;
3050 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
3051 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
3056 } id = { .vlan_id = 0, };
3057 uint32_t vlan_id = 0;
3059 if (!priv->config.l3_vxlan_en)
3060 return rte_flow_error_set(error, ENOTSUP,
3061 RTE_FLOW_ERROR_TYPE_ITEM, item,
3062 "L3 VXLAN is not enabled by device"
3063 " parameter and/or not configured in"
3065 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3066 return rte_flow_error_set(error, ENOTSUP,
3067 RTE_FLOW_ERROR_TYPE_ITEM, item,
3068 "a tunnel is already present");
3070 * Verify only UDPv4 is present as defined in
3071 * https://tools.ietf.org/html/rfc7348
3073 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
3074 return rte_flow_error_set(error, EINVAL,
3075 RTE_FLOW_ERROR_TYPE_ITEM, item,
3076 "no outer UDP layer found");
3078 mask = &rte_flow_item_vxlan_gpe_mask;
3079 ret = mlx5_flow_item_acceptable
3080 (item, (const uint8_t *)mask,
3081 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
3082 sizeof(struct rte_flow_item_vxlan_gpe),
3088 return rte_flow_error_set(error, ENOTSUP,
3089 RTE_FLOW_ERROR_TYPE_ITEM,
3091 "VxLAN-GPE protocol"
3093 memcpy(&id.vni[1], spec->vni, 3);
3094 vlan_id = id.vlan_id;
3095 memcpy(&id.vni[1], mask->vni, 3);
3096 vlan_id &= id.vlan_id;
3099 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
3100 * layer is defined in the Verbs specification it is interpreted as
3101 * wildcard and all packets will match this rule, if it follows a full
3102 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
3103 * before will also match this rule. To avoid such situation, VNI 0
3104 * is currently refused.
3107 return rte_flow_error_set(error, ENOTSUP,
3108 RTE_FLOW_ERROR_TYPE_ITEM, item,
3109 "VXLAN-GPE vni cannot be 0");
3110 if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
3111 return rte_flow_error_set(error, ENOTSUP,
3112 RTE_FLOW_ERROR_TYPE_ITEM, item,
3113 "VXLAN-GPE tunnel must be fully"
3119 * Validate GRE item.
3122 * Item specification.
3123 * @param[in] item_flags
3124 * Bit flags to mark detected items.
3125 * @param[in] target_protocol
3126 * The next protocol in the previous item.
3128 * Pointer to error structure.
3131 * 0 on success, a negative errno value otherwise and rte_errno is set.
3134 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
3135 uint64_t item_flags,
3136 uint8_t target_protocol,
3137 struct rte_flow_error *error)
3139 const struct rte_flow_item_gre *spec __rte_unused = item->spec;
3140 const struct rte_flow_item_gre *mask = item->mask;
3143 if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_GRE)
3144 return rte_flow_error_set(error, EINVAL,
3145 RTE_FLOW_ERROR_TYPE_ITEM, item,
3146 "protocol filtering not compatible"
3147 " with this GRE layer");
3148 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3149 return rte_flow_error_set(error, ENOTSUP,
3150 RTE_FLOW_ERROR_TYPE_ITEM, item,
3151 "a tunnel is already present");
3152 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
3153 return rte_flow_error_set(error, ENOTSUP,
3154 RTE_FLOW_ERROR_TYPE_ITEM, item,
3155 "L3 Layer is missing");
3157 mask = &rte_flow_item_gre_mask;
3158 ret = mlx5_flow_item_acceptable
3159 (item, (const uint8_t *)mask,
3160 (const uint8_t *)&rte_flow_item_gre_mask,
3161 sizeof(struct rte_flow_item_gre), error);
3164 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
3165 if (spec && (spec->protocol & mask->protocol))
3166 return rte_flow_error_set(error, ENOTSUP,
3167 RTE_FLOW_ERROR_TYPE_ITEM, item,
3168 "without MPLS support the"
3169 " specification cannot be used for"
3176 * Validate MPLS item.
3179 * Item specification.
3180 * @param[in] item_flags
3181 * Bit-fields that holds the items detected until now.
3182 * @param[in] target_protocol
3183 * The next protocol in the previous item.
3185 * Pointer to error structure.
3188 * 0 on success, a negative errno value otherwise and rte_errno is set.
3191 mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
3192 uint64_t item_flags __rte_unused,
3193 uint8_t target_protocol __rte_unused,
3194 struct rte_flow_error *error)
3196 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
3197 const struct rte_flow_item_mpls *mask = item->mask;
3200 if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_MPLS)
3201 return rte_flow_error_set(error, EINVAL,
3202 RTE_FLOW_ERROR_TYPE_ITEM, item,
3203 "protocol filtering not compatible"
3204 " with MPLS layer");
3205 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3206 return rte_flow_error_set(error, ENOTSUP,
3207 RTE_FLOW_ERROR_TYPE_ITEM, item,
3208 "a tunnel is already"
3211 mask = &rte_flow_item_mpls_mask;
3212 ret = mlx5_flow_item_acceptable
3213 (item, (const uint8_t *)mask,
3214 (const uint8_t *)&rte_flow_item_mpls_mask,
3215 sizeof(struct rte_flow_item_mpls), error);
3219 #endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
3220 return rte_flow_error_set(error, ENOTSUP,
3221 RTE_FLOW_ERROR_TYPE_ITEM, item,
3222 "MPLS is not supported by Verbs, please"
3228 * Internal validation function.
3231 * Pointer to the Ethernet device structure.
3233 * Pointer to the flow attributes.
3235 * Pointer to the list of items.
3236 * @param[in] actions
3237 * Pointer to the list of actions.
3239 * Pointer to the error structure.
3242 * 0 on success, a negative errno value otherwise and rte_ernno is set.
3244 static int mlx5_flow_verbs_validate(struct rte_eth_dev *dev,
3245 const struct rte_flow_attr *attr,
3246 const struct rte_flow_item items[],
3247 const struct rte_flow_action actions[],
3248 struct rte_flow_error *error)
3251 uint32_t action_flags = 0;
3252 uint32_t item_flags = 0;
3254 uint8_t next_protocol = 0xff;
3258 ret = mlx5_flow_validate_attributes(dev, attr, error);
3261 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3263 switch (items->type) {
3264 case RTE_FLOW_ITEM_TYPE_VOID:
3266 case RTE_FLOW_ITEM_TYPE_ETH:
3267 ret = mlx5_flow_validate_item_eth(items, item_flags,
3271 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3272 MLX5_FLOW_LAYER_OUTER_L2;
3274 case RTE_FLOW_ITEM_TYPE_VLAN:
3275 ret = mlx5_flow_validate_item_vlan(items, item_flags,
3279 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3280 MLX5_FLOW_LAYER_OUTER_VLAN;
3282 case RTE_FLOW_ITEM_TYPE_IPV4:
3283 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3287 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3288 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3289 if (items->mask != NULL &&
3290 ((const struct rte_flow_item_ipv4 *)
3291 items->mask)->hdr.next_proto_id)
3293 ((const struct rte_flow_item_ipv4 *)
3294 (items->spec))->hdr.next_proto_id;
3296 case RTE_FLOW_ITEM_TYPE_IPV6:
3297 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3301 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3302 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3303 if (items->mask != NULL &&
3304 ((const struct rte_flow_item_ipv6 *)
3305 items->mask)->hdr.proto)
3307 ((const struct rte_flow_item_ipv6 *)
3308 items->spec)->hdr.proto;
3310 case RTE_FLOW_ITEM_TYPE_UDP:
3311 ret = mlx5_flow_validate_item_udp(items, item_flags,
3316 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3317 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3319 case RTE_FLOW_ITEM_TYPE_TCP:
3320 ret = mlx5_flow_validate_item_tcp(items, item_flags,
3321 next_protocol, error);
3324 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3325 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3327 case RTE_FLOW_ITEM_TYPE_VXLAN:
3328 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3332 item_flags |= MLX5_FLOW_LAYER_VXLAN;
3334 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3335 ret = mlx5_flow_validate_item_vxlan_gpe(items,
3340 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
3342 case RTE_FLOW_ITEM_TYPE_GRE:
3343 ret = mlx5_flow_validate_item_gre(items, item_flags,
3344 next_protocol, error);
3347 item_flags |= MLX5_FLOW_LAYER_GRE;
3349 case RTE_FLOW_ITEM_TYPE_MPLS:
3350 ret = mlx5_flow_validate_item_mpls(items, item_flags,
3355 if (next_protocol != 0xff &&
3356 next_protocol != MLX5_IP_PROTOCOL_MPLS)
3357 return rte_flow_error_set
3359 RTE_FLOW_ERROR_TYPE_ITEM, items,
3360 "protocol filtering not compatible"
3361 " with MPLS layer");
3362 item_flags |= MLX5_FLOW_LAYER_MPLS;
3365 return rte_flow_error_set(error, ENOTSUP,
3366 RTE_FLOW_ERROR_TYPE_ITEM,
3368 "item not supported");
3371 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3372 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3373 switch (actions->type) {
3374 case RTE_FLOW_ACTION_TYPE_VOID:
3376 case RTE_FLOW_ACTION_TYPE_FLAG:
3377 ret = mlx5_flow_validate_action_flag(action_flags,
3381 action_flags |= MLX5_FLOW_ACTION_FLAG;
3383 case RTE_FLOW_ACTION_TYPE_MARK:
3384 ret = mlx5_flow_validate_action_mark(actions,
3389 action_flags |= MLX5_FLOW_ACTION_MARK;
3391 case RTE_FLOW_ACTION_TYPE_DROP:
3392 ret = mlx5_flow_validate_action_drop(action_flags,
3396 action_flags |= MLX5_FLOW_ACTION_DROP;
3398 case RTE_FLOW_ACTION_TYPE_QUEUE:
3399 ret = mlx5_flow_validate_action_queue(actions,
3404 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3406 case RTE_FLOW_ACTION_TYPE_RSS:
3407 ret = mlx5_flow_validate_action_rss(actions,
3412 action_flags |= MLX5_FLOW_ACTION_RSS;
3414 case RTE_FLOW_ACTION_TYPE_COUNT:
3415 ret = mlx5_flow_validate_action_count(dev, error);
3418 action_flags |= MLX5_FLOW_ACTION_COUNT;
3421 return rte_flow_error_set(error, ENOTSUP,
3422 RTE_FLOW_ERROR_TYPE_ACTION,
3424 "action not supported");
3431 * Validate a flow supported by the NIC.
3433 * @see rte_flow_validate()
3437 mlx5_flow_validate(struct rte_eth_dev *dev,
3438 const struct rte_flow_attr *attr,
3439 const struct rte_flow_item items[],
3440 const struct rte_flow_action actions[],
3441 struct rte_flow_error *error)
3445 ret = mlx5_flow_verbs_validate(dev, attr, items, actions, error);
3455 * Pointer to the Ethernet device structure.
3456 * @param[in, out] flow
3457 * Pointer to flow structure.
3460 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3462 struct priv *priv = dev->data->dev_private;
3463 struct mlx5_flow_verbs *verbs;
3465 if (flow->nl_flow && priv->mnl_socket)
3466 mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
3467 LIST_FOREACH(verbs, &flow->verbs, next) {
3469 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
3473 if (flow->fate & MLX5_FLOW_FATE_DROP)
3474 mlx5_hrxq_drop_release(dev);
3476 mlx5_hrxq_release(dev, verbs->hrxq);
3480 if (flow->counter) {
3481 mlx5_flow_counter_release(flow->counter);
3482 flow->counter = NULL;
3490 * Pointer to the Ethernet device structure.
3491 * @param[in, out] flow
3492 * Pointer to flow structure.
3494 * Pointer to error structure.
3497 * 0 on success, a negative errno value otherwise and rte_errno is set.
3500 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3501 struct rte_flow_error *error)
3503 struct priv *priv = dev->data->dev_private;
3504 struct mlx5_flow_verbs *verbs;
3507 LIST_FOREACH(verbs, &flow->verbs, next) {
3508 if (flow->fate & MLX5_FLOW_FATE_DROP) {
3509 verbs->hrxq = mlx5_hrxq_drop_new(dev);
3513 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3515 "cannot get drop hash queue");
3519 struct mlx5_hrxq *hrxq;
3521 hrxq = mlx5_hrxq_get(dev, flow->key,
3522 MLX5_RSS_HASH_KEY_LEN,
3525 flow->rss.queue_num);
3527 hrxq = mlx5_hrxq_new(dev, flow->key,
3528 MLX5_RSS_HASH_KEY_LEN,
3531 flow->rss.queue_num,
3533 MLX5_FLOW_LAYER_TUNNEL));
3537 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3539 "cannot get hash queue");
3545 mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
3547 rte_flow_error_set(error, errno,
3548 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3550 "hardware refuses to create flow");
3554 if (flow->nl_flow &&
3556 mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
3560 err = rte_errno; /* Save rte_errno before cleanup. */
3561 LIST_FOREACH(verbs, &flow->verbs, next) {
3563 if (flow->fate & MLX5_FLOW_FATE_DROP)
3564 mlx5_hrxq_drop_release(dev);
3566 mlx5_hrxq_release(dev, verbs->hrxq);
3570 rte_errno = err; /* Restore rte_errno. */
3575 * Create a flow and add it to @p list.
3578 * Pointer to Ethernet device.
3580 * Pointer to a TAILQ flow list.
3582 * Flow rule attributes.
3584 * Pattern specification (list terminated by the END pattern item).
3585 * @param[in] actions
3586 * Associated actions (list terminated by the END action).
3588 * Perform verbose error reporting if not NULL.
3591 * A flow on success, NULL otherwise and rte_errno is set.
3593 static struct rte_flow *
3594 mlx5_flow_list_create(struct rte_eth_dev *dev,
3595 struct mlx5_flows *list,
3596 const struct rte_flow_attr *attr,
3597 const struct rte_flow_item items[],
3598 const struct rte_flow_action actions[],
3599 struct rte_flow_error *error)
3601 struct rte_flow *flow = NULL;
3605 ret = mlx5_flow_validate(dev, attr, items, actions, error);
3608 ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
3612 flow = rte_calloc(__func__, 1, size, 0);
3614 rte_flow_error_set(error, ENOMEM,
3615 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3617 "not enough memory to create flow");
3620 ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
3625 assert((size_t)ret == size);
3626 if (dev->data->dev_started) {
3627 ret = mlx5_flow_apply(dev, flow, error);
3629 ret = rte_errno; /* Save rte_errno before cleanup. */
3631 mlx5_flow_remove(dev, flow);
3634 rte_errno = ret; /* Restore rte_errno. */
3638 TAILQ_INSERT_TAIL(list, flow, next);
3639 mlx5_flow_rxq_flags_set(dev, flow);
3646 * @see rte_flow_create()
3650 mlx5_flow_create(struct rte_eth_dev *dev,
3651 const struct rte_flow_attr *attr,
3652 const struct rte_flow_item items[],
3653 const struct rte_flow_action actions[],
3654 struct rte_flow_error *error)
3656 return mlx5_flow_list_create
3657 (dev, &((struct priv *)dev->data->dev_private)->flows,
3658 attr, items, actions, error);
3662 * Destroy a flow in a list.
3665 * Pointer to Ethernet device.
3667 * Pointer to a TAILQ flow list.
3672 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
3673 struct rte_flow *flow)
3675 mlx5_flow_remove(dev, flow);
3676 TAILQ_REMOVE(list, flow, next);
3678 * Update RX queue flags only if port is started, otherwise it is
3681 if (dev->data->dev_started)
3682 mlx5_flow_rxq_flags_trim(dev, flow);
3687 * Destroy all flows.
3690 * Pointer to Ethernet device.
3692 * Pointer to a TAILQ flow list.
3695 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
3697 while (!TAILQ_EMPTY(list)) {
3698 struct rte_flow *flow;
3700 flow = TAILQ_FIRST(list);
3701 mlx5_flow_list_destroy(dev, list, flow);
3709 * Pointer to Ethernet device.
3711 * Pointer to a TAILQ flow list.
3714 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
3716 struct rte_flow *flow;
3718 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
3719 mlx5_flow_remove(dev, flow);
3720 mlx5_flow_rxq_flags_clear(dev);
3727 * Pointer to Ethernet device.
3729 * Pointer to a TAILQ flow list.
3732 * 0 on success, a negative errno value otherwise and rte_errno is set.
3735 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
3737 struct rte_flow *flow;
3738 struct rte_flow_error error;
3741 TAILQ_FOREACH(flow, list, next) {
3742 ret = mlx5_flow_apply(dev, flow, &error);
3745 mlx5_flow_rxq_flags_set(dev, flow);
3749 ret = rte_errno; /* Save rte_errno before cleanup. */
3750 mlx5_flow_stop(dev, list);
3751 rte_errno = ret; /* Restore rte_errno. */
3756 * Verify the flow list is empty
3759 * Pointer to Ethernet device.
3761 * @return the number of flows not released.
3764 mlx5_flow_verify(struct rte_eth_dev *dev)
3766 struct priv *priv = dev->data->dev_private;
3767 struct rte_flow *flow;
3770 TAILQ_FOREACH(flow, &priv->flows, next) {
3771 DRV_LOG(DEBUG, "port %u flow %p still referenced",
3772 dev->data->port_id, (void *)flow);
3779 * Enable a control flow configured from the control plane.
3782 * Pointer to Ethernet device.
3784 * An Ethernet flow spec to apply.
3786 * An Ethernet flow mask to apply.
3788 * A VLAN flow spec to apply.
3790 * A VLAN flow mask to apply.
3793 * 0 on success, a negative errno value otherwise and rte_errno is set.
3796 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
3797 struct rte_flow_item_eth *eth_spec,
3798 struct rte_flow_item_eth *eth_mask,
3799 struct rte_flow_item_vlan *vlan_spec,
3800 struct rte_flow_item_vlan *vlan_mask)
3802 struct priv *priv = dev->data->dev_private;
3803 const struct rte_flow_attr attr = {
3805 .priority = MLX5_FLOW_PRIO_RSVD,
3807 struct rte_flow_item items[] = {
3809 .type = RTE_FLOW_ITEM_TYPE_ETH,
3815 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
3816 RTE_FLOW_ITEM_TYPE_END,
3822 .type = RTE_FLOW_ITEM_TYPE_END,
3825 uint16_t queue[priv->reta_idx_n];
3826 struct rte_flow_action_rss action_rss = {
3827 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3829 .types = priv->rss_conf.rss_hf,
3830 .key_len = priv->rss_conf.rss_key_len,
3831 .queue_num = priv->reta_idx_n,
3832 .key = priv->rss_conf.rss_key,
3835 struct rte_flow_action actions[] = {
3837 .type = RTE_FLOW_ACTION_TYPE_RSS,
3838 .conf = &action_rss,
3841 .type = RTE_FLOW_ACTION_TYPE_END,
3844 struct rte_flow *flow;
3845 struct rte_flow_error error;
3848 if (!priv->reta_idx_n) {
3852 for (i = 0; i != priv->reta_idx_n; ++i)
3853 queue[i] = (*priv->reta_idx)[i];
3854 flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
3862 * Enable a flow control configured from the control plane.
3865 * Pointer to Ethernet device.
3867 * An Ethernet flow spec to apply.
3869 * An Ethernet flow mask to apply.
3872 * 0 on success, a negative errno value otherwise and rte_errno is set.
3875 mlx5_ctrl_flow(struct rte_eth_dev *dev,
3876 struct rte_flow_item_eth *eth_spec,
3877 struct rte_flow_item_eth *eth_mask)
3879 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
3885 * @see rte_flow_destroy()
3889 mlx5_flow_destroy(struct rte_eth_dev *dev,
3890 struct rte_flow *flow,
3891 struct rte_flow_error *error __rte_unused)
3893 struct priv *priv = dev->data->dev_private;
3895 mlx5_flow_list_destroy(dev, &priv->flows, flow);
3900 * Destroy all flows.
3902 * @see rte_flow_flush()
3906 mlx5_flow_flush(struct rte_eth_dev *dev,
3907 struct rte_flow_error *error __rte_unused)
3909 struct priv *priv = dev->data->dev_private;
3911 mlx5_flow_list_flush(dev, &priv->flows);
3918 * @see rte_flow_isolate()
3922 mlx5_flow_isolate(struct rte_eth_dev *dev,
3924 struct rte_flow_error *error)
3926 struct priv *priv = dev->data->dev_private;
3928 if (dev->data->dev_started) {
3929 rte_flow_error_set(error, EBUSY,
3930 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3932 "port must be stopped first");
3935 priv->isolated = !!enable;
3937 dev->dev_ops = &mlx5_dev_ops_isolate;
3939 dev->dev_ops = &mlx5_dev_ops;
3944 * Query flow counter.
3947 * Pointer to the flow.
3950 * 0 on success, a negative errno value otherwise and rte_errno is set.
3953 mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
3954 void *data __rte_unused,
3955 struct rte_flow_error *error)
3957 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
3958 if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
3959 struct rte_flow_query_count *qc = data;
3960 uint64_t counters[2] = {0, 0};
3961 struct ibv_query_counter_set_attr query_cs_attr = {
3962 .cs = flow->counter->cs,
3963 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
3965 struct ibv_counter_set_data query_out = {
3967 .outlen = 2 * sizeof(uint64_t),
3969 int err = mlx5_glue->query_counter_set(&query_cs_attr,
3973 return rte_flow_error_set
3975 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3977 "cannot read counter");
3980 qc->hits = counters[0] - flow->counter->hits;
3981 qc->bytes = counters[1] - flow->counter->bytes;
3983 flow->counter->hits = counters[0];
3984 flow->counter->bytes = counters[1];
3988 return rte_flow_error_set(error, ENOTSUP,
3989 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3991 "flow does not have counter");
3993 return rte_flow_error_set(error, ENOTSUP,
3994 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3996 "counters are not available");
4002 * @see rte_flow_query()
4006 mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
4007 struct rte_flow *flow,
4008 const struct rte_flow_action *actions,
4010 struct rte_flow_error *error)
4014 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4015 switch (actions->type) {
4016 case RTE_FLOW_ACTION_TYPE_VOID:
4018 case RTE_FLOW_ACTION_TYPE_COUNT:
4019 ret = mlx5_flow_query_count(flow, data, error);
4022 return rte_flow_error_set(error, ENOTSUP,
4023 RTE_FLOW_ERROR_TYPE_ACTION,
4025 "action not supported");
4034 * Convert a flow director filter to a generic flow.
4037 * Pointer to Ethernet device.
4038 * @param fdir_filter
4039 * Flow director filter to add.
4041 * Generic flow parameters structure.
4044 * 0 on success, a negative errno value otherwise and rte_errno is set.
4047 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
4048 const struct rte_eth_fdir_filter *fdir_filter,
4049 struct mlx5_fdir *attributes)
4051 struct priv *priv = dev->data->dev_private;
4052 const struct rte_eth_fdir_input *input = &fdir_filter->input;
4053 const struct rte_eth_fdir_masks *mask =
4054 &dev->data->dev_conf.fdir_conf.mask;
4056 /* Validate queue number. */
4057 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
4058 DRV_LOG(ERR, "port %u invalid queue number %d",
4059 dev->data->port_id, fdir_filter->action.rx_queue);
4063 attributes->attr.ingress = 1;
4064 attributes->items[0] = (struct rte_flow_item) {
4065 .type = RTE_FLOW_ITEM_TYPE_ETH,
4066 .spec = &attributes->l2,
4067 .mask = &attributes->l2_mask,
4069 switch (fdir_filter->action.behavior) {
4070 case RTE_ETH_FDIR_ACCEPT:
4071 attributes->actions[0] = (struct rte_flow_action){
4072 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
4073 .conf = &attributes->queue,
4076 case RTE_ETH_FDIR_REJECT:
4077 attributes->actions[0] = (struct rte_flow_action){
4078 .type = RTE_FLOW_ACTION_TYPE_DROP,
4082 DRV_LOG(ERR, "port %u invalid behavior %d",
4084 fdir_filter->action.behavior);
4085 rte_errno = ENOTSUP;
4088 attributes->queue.index = fdir_filter->action.rx_queue;
4090 switch (fdir_filter->input.flow_type) {
4091 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
4092 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
4093 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
4094 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
4095 .src_addr = input->flow.ip4_flow.src_ip,
4096 .dst_addr = input->flow.ip4_flow.dst_ip,
4097 .time_to_live = input->flow.ip4_flow.ttl,
4098 .type_of_service = input->flow.ip4_flow.tos,
4099 .next_proto_id = input->flow.ip4_flow.proto,
4101 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
4102 .src_addr = mask->ipv4_mask.src_ip,
4103 .dst_addr = mask->ipv4_mask.dst_ip,
4104 .time_to_live = mask->ipv4_mask.ttl,
4105 .type_of_service = mask->ipv4_mask.tos,
4106 .next_proto_id = mask->ipv4_mask.proto,
4108 attributes->items[1] = (struct rte_flow_item){
4109 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4110 .spec = &attributes->l3,
4111 .mask = &attributes->l3_mask,
4114 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
4115 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
4116 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
4117 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
4118 .hop_limits = input->flow.ipv6_flow.hop_limits,
4119 .proto = input->flow.ipv6_flow.proto,
4122 memcpy(attributes->l3.ipv6.hdr.src_addr,
4123 input->flow.ipv6_flow.src_ip,
4124 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
4125 memcpy(attributes->l3.ipv6.hdr.dst_addr,
4126 input->flow.ipv6_flow.dst_ip,
4127 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
4128 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
4129 mask->ipv6_mask.src_ip,
4130 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
4131 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
4132 mask->ipv6_mask.dst_ip,
4133 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
4134 attributes->items[1] = (struct rte_flow_item){
4135 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4136 .spec = &attributes->l3,
4137 .mask = &attributes->l3_mask,
4141 DRV_LOG(ERR, "port %u invalid flow type%d",
4142 dev->data->port_id, fdir_filter->input.flow_type);
4143 rte_errno = ENOTSUP;
4147 switch (fdir_filter->input.flow_type) {
4148 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
4149 attributes->l4.udp.hdr = (struct udp_hdr){
4150 .src_port = input->flow.udp4_flow.src_port,
4151 .dst_port = input->flow.udp4_flow.dst_port,
4153 attributes->l4_mask.udp.hdr = (struct udp_hdr){
4154 .src_port = mask->src_port_mask,
4155 .dst_port = mask->dst_port_mask,
4157 attributes->items[2] = (struct rte_flow_item){
4158 .type = RTE_FLOW_ITEM_TYPE_UDP,
4159 .spec = &attributes->l4,
4160 .mask = &attributes->l4_mask,
4163 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
4164 attributes->l4.tcp.hdr = (struct tcp_hdr){
4165 .src_port = input->flow.tcp4_flow.src_port,
4166 .dst_port = input->flow.tcp4_flow.dst_port,
4168 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
4169 .src_port = mask->src_port_mask,
4170 .dst_port = mask->dst_port_mask,
4172 attributes->items[2] = (struct rte_flow_item){
4173 .type = RTE_FLOW_ITEM_TYPE_TCP,
4174 .spec = &attributes->l4,
4175 .mask = &attributes->l4_mask,
4178 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
4179 attributes->l4.udp.hdr = (struct udp_hdr){
4180 .src_port = input->flow.udp6_flow.src_port,
4181 .dst_port = input->flow.udp6_flow.dst_port,
4183 attributes->l4_mask.udp.hdr = (struct udp_hdr){
4184 .src_port = mask->src_port_mask,
4185 .dst_port = mask->dst_port_mask,
4187 attributes->items[2] = (struct rte_flow_item){
4188 .type = RTE_FLOW_ITEM_TYPE_UDP,
4189 .spec = &attributes->l4,
4190 .mask = &attributes->l4_mask,
4193 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
4194 attributes->l4.tcp.hdr = (struct tcp_hdr){
4195 .src_port = input->flow.tcp6_flow.src_port,
4196 .dst_port = input->flow.tcp6_flow.dst_port,
4198 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
4199 .src_port = mask->src_port_mask,
4200 .dst_port = mask->dst_port_mask,
4202 attributes->items[2] = (struct rte_flow_item){
4203 .type = RTE_FLOW_ITEM_TYPE_TCP,
4204 .spec = &attributes->l4,
4205 .mask = &attributes->l4_mask,
4208 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
4209 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
4212 DRV_LOG(ERR, "port %u invalid flow type%d",
4213 dev->data->port_id, fdir_filter->input.flow_type);
4214 rte_errno = ENOTSUP;
4221 * Add new flow director filter and store it in list.
4224 * Pointer to Ethernet device.
4225 * @param fdir_filter
4226 * Flow director filter to add.
4229 * 0 on success, a negative errno value otherwise and rte_errno is set.
4232 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
4233 const struct rte_eth_fdir_filter *fdir_filter)
4235 struct priv *priv = dev->data->dev_private;
4236 struct mlx5_fdir attributes = {
4239 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
4240 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
4244 struct rte_flow_error error;
4245 struct rte_flow *flow;
4248 ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
4251 flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
4252 attributes.items, attributes.actions,
4255 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
4263 * Delete specific filter.
4266 * Pointer to Ethernet device.
4267 * @param fdir_filter
4268 * Filter to be deleted.
4271 * 0 on success, a negative errno value otherwise and rte_errno is set.
4274 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
4275 const struct rte_eth_fdir_filter *fdir_filter
4278 rte_errno = ENOTSUP;
4283 * Update queue for specific filter.
4286 * Pointer to Ethernet device.
4287 * @param fdir_filter
4288 * Filter to be updated.
4291 * 0 on success, a negative errno value otherwise and rte_errno is set.
4294 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
4295 const struct rte_eth_fdir_filter *fdir_filter)
4299 ret = mlx5_fdir_filter_delete(dev, fdir_filter);
4302 return mlx5_fdir_filter_add(dev, fdir_filter);
4306 * Flush all filters.
4309 * Pointer to Ethernet device.
4312 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
4314 struct priv *priv = dev->data->dev_private;
4316 mlx5_flow_list_flush(dev, &priv->flows);
4320 * Get flow director information.
4323 * Pointer to Ethernet device.
4324 * @param[out] fdir_info
4325 * Resulting flow director information.
4328 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
4330 struct rte_eth_fdir_masks *mask =
4331 &dev->data->dev_conf.fdir_conf.mask;
4333 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
4334 fdir_info->guarant_spc = 0;
4335 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
4336 fdir_info->max_flexpayload = 0;
4337 fdir_info->flow_types_mask[0] = 0;
4338 fdir_info->flex_payload_unit = 0;
4339 fdir_info->max_flex_payload_segment_num = 0;
4340 fdir_info->flex_payload_limit = 0;
4341 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
4345 * Deal with flow director operations.
4348 * Pointer to Ethernet device.
4350 * Operation to perform.
4352 * Pointer to operation-specific structure.
4355 * 0 on success, a negative errno value otherwise and rte_errno is set.
4358 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4361 enum rte_fdir_mode fdir_mode =
4362 dev->data->dev_conf.fdir_conf.mode;
4364 if (filter_op == RTE_ETH_FILTER_NOP)
4366 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
4367 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
4368 DRV_LOG(ERR, "port %u flow director mode %d not supported",
4369 dev->data->port_id, fdir_mode);
4373 switch (filter_op) {
4374 case RTE_ETH_FILTER_ADD:
4375 return mlx5_fdir_filter_add(dev, arg);
4376 case RTE_ETH_FILTER_UPDATE:
4377 return mlx5_fdir_filter_update(dev, arg);
4378 case RTE_ETH_FILTER_DELETE:
4379 return mlx5_fdir_filter_delete(dev, arg);
4380 case RTE_ETH_FILTER_FLUSH:
4381 mlx5_fdir_filter_flush(dev);
4383 case RTE_ETH_FILTER_INFO:
4384 mlx5_fdir_info_get(dev, arg);
4387 DRV_LOG(DEBUG, "port %u unknown operation %u",
4388 dev->data->port_id, filter_op);
4396 * Manage filter operations.
4399 * Pointer to Ethernet device structure.
4400 * @param filter_type
4403 * Operation to perform.
4405 * Pointer to operation-specific structure.
4408 * 0 on success, a negative errno value otherwise and rte_errno is set.
4411 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
4412 enum rte_filter_type filter_type,
4413 enum rte_filter_op filter_op,
4416 switch (filter_type) {
4417 case RTE_ETH_FILTER_GENERIC:
4418 if (filter_op != RTE_ETH_FILTER_GET) {
4422 *(const void **)arg = &mlx5_flow_ops;
4424 case RTE_ETH_FILTER_FDIR:
4425 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
4427 DRV_LOG(ERR, "port %u filter type (%d) not supported",
4428 dev->data->port_id, filter_type);
4429 rte_errno = ENOTSUP;