1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
40 * Pointer to the Ethernet device structure.
42 * Indicate if this counter is shared with other flows.
47 * A pointer to the counter, NULL otherwise and rte_errno is set.
49 static struct mlx5_flow_counter *
50 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
52 struct priv *priv = dev->data->dev_private;
53 struct mlx5_flow_counter *cnt;
55 LIST_FOREACH(cnt, &priv->flow_counters, next) {
56 if (!cnt->shared || cnt->shared != shared)
63 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_V42
65 struct mlx5_flow_counter tmpl = {
68 .cs = mlx5_glue->create_counter_set
70 &(struct ibv_counter_set_init_attr){
82 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
84 claim_zero(mlx5_glue->destroy_counter_set(tmpl.cs));
89 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
97 * Release a flow counter.
100 * Pointer to the counter handler.
103 flow_verbs_counter_release(struct mlx5_flow_counter *counter)
105 if (--counter->ref_cnt == 0) {
106 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
107 LIST_REMOVE(counter, next);
113 * Add a verbs item specification into @p flow.
115 * @param[in, out] flow
116 * Pointer to flow structure.
118 * Create specification.
120 * Size in bytes of the specification to copy.
123 flow_verbs_spec_add(struct mlx5_flow *flow, void *src, unsigned int size)
125 struct mlx5_flow_verbs *verbs = &flow->verbs;
130 dst = (void *)(verbs->specs + verbs->size);
131 memcpy(dst, src, size);
132 ++verbs->attr->num_of_specs;
138 * Convert the @p item into a Verbs specification. This function assumes that
139 * the input is valid and that there is space to insert the requested item
143 * Item specification.
144 * @param[in] item_flags
145 * Bit field with all detected items.
146 * @param[in, out] dev_flow
147 * Pointer to dev_flow structure.
150 flow_verbs_translate_item_eth(const struct rte_flow_item *item,
151 uint64_t *item_flags,
152 struct mlx5_flow *dev_flow)
154 const struct rte_flow_item_eth *spec = item->spec;
155 const struct rte_flow_item_eth *mask = item->mask;
156 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
157 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
158 struct ibv_flow_spec_eth eth = {
159 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
164 mask = &rte_flow_item_eth_mask;
168 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
169 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
170 eth.val.ether_type = spec->type;
171 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
172 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
173 eth.mask.ether_type = mask->type;
174 /* Remove unwanted bits from values. */
175 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
176 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
177 eth.val.src_mac[i] &= eth.mask.src_mac[i];
179 eth.val.ether_type &= eth.mask.ether_type;
180 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
182 flow_verbs_spec_add(dev_flow, ð, size);
183 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
184 MLX5_FLOW_LAYER_OUTER_L2;
188 * Update the VLAN tag in the Verbs Ethernet specification.
189 * This function assumes that the input is valid and there is space to add
190 * the requested item.
192 * @param[in, out] attr
193 * Pointer to Verbs attributes structure.
195 * Verbs structure containing the VLAN information to copy.
198 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
199 struct ibv_flow_spec_eth *eth)
202 const enum ibv_flow_spec_type search = eth->type;
203 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
204 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
206 for (i = 0; i != attr->num_of_specs; ++i) {
207 if (hdr->type == search) {
208 struct ibv_flow_spec_eth *e =
209 (struct ibv_flow_spec_eth *)hdr;
211 e->val.vlan_tag = eth->val.vlan_tag;
212 e->mask.vlan_tag = eth->mask.vlan_tag;
213 e->val.ether_type = eth->val.ether_type;
214 e->mask.ether_type = eth->mask.ether_type;
217 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
222 * Convert the @p item into a Verbs specification. This function assumes that
223 * the input is valid and that there is space to insert the requested item
227 * Item specification.
228 * @param[in, out] item_flags
229 * Bit mask that holds all detected items.
230 * @param[in, out] dev_flow
231 * Pointer to dev_flow structure.
234 flow_verbs_translate_item_vlan(const struct rte_flow_item *item,
235 uint64_t *item_flags,
236 struct mlx5_flow *dev_flow)
238 const struct rte_flow_item_vlan *spec = item->spec;
239 const struct rte_flow_item_vlan *mask = item->mask;
240 unsigned int size = sizeof(struct ibv_flow_spec_eth);
241 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
242 struct ibv_flow_spec_eth eth = {
243 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
246 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
247 MLX5_FLOW_LAYER_OUTER_L2;
250 mask = &rte_flow_item_vlan_mask;
252 eth.val.vlan_tag = spec->tci;
253 eth.mask.vlan_tag = mask->tci;
254 eth.val.vlan_tag &= eth.mask.vlan_tag;
255 eth.val.ether_type = spec->inner_type;
256 eth.mask.ether_type = mask->inner_type;
257 eth.val.ether_type &= eth.mask.ether_type;
259 if (!(*item_flags & l2m)) {
260 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
261 flow_verbs_spec_add(dev_flow, ð, size);
263 flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
264 size = 0; /* Only an update is done in eth specification. */
266 *item_flags |= tunnel ?
267 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
268 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
272 * Convert the @p item into a Verbs specification. This function assumes that
273 * the input is valid and that there is space to insert the requested item
277 * Item specification.
278 * @param[in, out] item_flags
279 * Bit mask that marks all detected items.
280 * @param[in, out] dev_flow
281 * Pointer to sepacific flow structure.
284 flow_verbs_translate_item_ipv4(const struct rte_flow_item *item,
285 uint64_t *item_flags,
286 struct mlx5_flow *dev_flow)
288 const struct rte_flow_item_ipv4 *spec = item->spec;
289 const struct rte_flow_item_ipv4 *mask = item->mask;
290 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
291 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
292 struct ibv_flow_spec_ipv4_ext ipv4 = {
293 .type = IBV_FLOW_SPEC_IPV4_EXT |
294 (tunnel ? IBV_FLOW_SPEC_INNER : 0),
299 mask = &rte_flow_item_ipv4_mask;
300 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
301 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
303 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
304 .src_ip = spec->hdr.src_addr,
305 .dst_ip = spec->hdr.dst_addr,
306 .proto = spec->hdr.next_proto_id,
307 .tos = spec->hdr.type_of_service,
309 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
310 .src_ip = mask->hdr.src_addr,
311 .dst_ip = mask->hdr.dst_addr,
312 .proto = mask->hdr.next_proto_id,
313 .tos = mask->hdr.type_of_service,
315 /* Remove unwanted bits from values. */
316 ipv4.val.src_ip &= ipv4.mask.src_ip;
317 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
318 ipv4.val.proto &= ipv4.mask.proto;
319 ipv4.val.tos &= ipv4.mask.tos;
321 dev_flow->verbs.hash_fields |=
322 mlx5_flow_hashfields_adjust(dev_flow, tunnel,
323 MLX5_IPV4_LAYER_TYPES,
324 MLX5_IPV4_IBV_RX_HASH);
325 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
326 flow_verbs_spec_add(dev_flow, &ipv4, size);
330 * Convert the @p item into a Verbs specification. This function assumes that
331 * the input is valid and that there is space to insert the requested item
335 * Item specification.
336 * @param[in, out] item_flags
337 * Bit mask that marks all detected items.
338 * @param[in, out] dev_flow
339 * Pointer to sepacific flow structure.
342 flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
343 uint64_t *item_flags,
344 struct mlx5_flow *dev_flow)
346 const struct rte_flow_item_ipv6 *spec = item->spec;
347 const struct rte_flow_item_ipv6 *mask = item->mask;
348 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
349 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
350 struct ibv_flow_spec_ipv6 ipv6 = {
351 .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
356 mask = &rte_flow_item_ipv6_mask;
357 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
358 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
361 uint32_t vtc_flow_val;
362 uint32_t vtc_flow_mask;
364 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
365 RTE_DIM(ipv6.val.src_ip));
366 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
367 RTE_DIM(ipv6.val.dst_ip));
368 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
369 RTE_DIM(ipv6.mask.src_ip));
370 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
371 RTE_DIM(ipv6.mask.dst_ip));
372 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
373 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
374 ipv6.val.flow_label =
375 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
377 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
379 ipv6.val.next_hdr = spec->hdr.proto;
380 ipv6.val.hop_limit = spec->hdr.hop_limits;
381 ipv6.mask.flow_label =
382 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
384 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
386 ipv6.mask.next_hdr = mask->hdr.proto;
387 ipv6.mask.hop_limit = mask->hdr.hop_limits;
388 /* Remove unwanted bits from values. */
389 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
390 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
391 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
393 ipv6.val.flow_label &= ipv6.mask.flow_label;
394 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
395 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
396 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
398 dev_flow->verbs.hash_fields |=
399 mlx5_flow_hashfields_adjust(dev_flow, tunnel,
400 MLX5_IPV6_LAYER_TYPES,
401 MLX5_IPV6_IBV_RX_HASH);
402 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
403 flow_verbs_spec_add(dev_flow, &ipv6, size);
407 * Convert the @p item into a Verbs specification. This function assumes that
408 * the input is valid and that there is space to insert the requested item
412 * Item specification.
413 * @param[in, out] item_flags
414 * Bit mask that marks all detected items.
415 * @param[in, out] dev_flow
416 * Pointer to sepacific flow structure.
419 flow_verbs_translate_item_udp(const struct rte_flow_item *item,
420 uint64_t *item_flags,
421 struct mlx5_flow *dev_flow)
423 const struct rte_flow_item_udp *spec = item->spec;
424 const struct rte_flow_item_udp *mask = item->mask;
425 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
426 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
427 struct ibv_flow_spec_tcp_udp udp = {
428 .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
433 mask = &rte_flow_item_udp_mask;
434 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
435 MLX5_FLOW_LAYER_OUTER_L4_UDP;
437 udp.val.dst_port = spec->hdr.dst_port;
438 udp.val.src_port = spec->hdr.src_port;
439 udp.mask.dst_port = mask->hdr.dst_port;
440 udp.mask.src_port = mask->hdr.src_port;
441 /* Remove unwanted bits from values. */
442 udp.val.src_port &= udp.mask.src_port;
443 udp.val.dst_port &= udp.mask.dst_port;
445 dev_flow->verbs.hash_fields |=
446 mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_UDP,
447 (IBV_RX_HASH_SRC_PORT_UDP |
448 IBV_RX_HASH_DST_PORT_UDP));
449 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
450 flow_verbs_spec_add(dev_flow, &udp, size);
454 * Convert the @p item into a Verbs specification. This function assumes that
455 * the input is valid and that there is space to insert the requested item
459 * Item specification.
460 * @param[in, out] item_flags
461 * Bit mask that marks all detected items.
462 * @param[in, out] dev_flow
463 * Pointer to sepacific flow structure.
466 flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
467 uint64_t *item_flags,
468 struct mlx5_flow *dev_flow)
470 const struct rte_flow_item_tcp *spec = item->spec;
471 const struct rte_flow_item_tcp *mask = item->mask;
472 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
473 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
474 struct ibv_flow_spec_tcp_udp tcp = {
475 .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
480 mask = &rte_flow_item_tcp_mask;
481 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
482 MLX5_FLOW_LAYER_OUTER_L4_TCP;
484 tcp.val.dst_port = spec->hdr.dst_port;
485 tcp.val.src_port = spec->hdr.src_port;
486 tcp.mask.dst_port = mask->hdr.dst_port;
487 tcp.mask.src_port = mask->hdr.src_port;
488 /* Remove unwanted bits from values. */
489 tcp.val.src_port &= tcp.mask.src_port;
490 tcp.val.dst_port &= tcp.mask.dst_port;
492 dev_flow->verbs.hash_fields |=
493 mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_TCP,
494 (IBV_RX_HASH_SRC_PORT_TCP |
495 IBV_RX_HASH_DST_PORT_TCP));
496 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
497 flow_verbs_spec_add(dev_flow, &tcp, size);
501 * Convert the @p item into a Verbs specification. This function assumes that
502 * the input is valid and that there is space to insert the requested item
506 * Item specification.
507 * @param[in, out] item_flags
508 * Bit mask that marks all detected items.
509 * @param[in, out] dev_flow
510 * Pointer to sepacific flow structure.
513 flow_verbs_translate_item_vxlan(const struct rte_flow_item *item,
514 uint64_t *item_flags,
515 struct mlx5_flow *dev_flow)
517 const struct rte_flow_item_vxlan *spec = item->spec;
518 const struct rte_flow_item_vxlan *mask = item->mask;
519 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
520 struct ibv_flow_spec_tunnel vxlan = {
521 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
527 } id = { .vlan_id = 0, };
530 mask = &rte_flow_item_vxlan_mask;
532 memcpy(&id.vni[1], spec->vni, 3);
533 vxlan.val.tunnel_id = id.vlan_id;
534 memcpy(&id.vni[1], mask->vni, 3);
535 vxlan.mask.tunnel_id = id.vlan_id;
536 /* Remove unwanted bits from values. */
537 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
539 flow_verbs_spec_add(dev_flow, &vxlan, size);
540 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
541 *item_flags |= MLX5_FLOW_LAYER_VXLAN;
545 * Convert the @p item into a Verbs specification. This function assumes that
546 * the input is valid and that there is space to insert the requested item
550 * Item specification.
551 * @param[in, out] item_flags
552 * Bit mask that marks all detected items.
553 * @param[in, out] dev_flow
554 * Pointer to sepacific flow structure.
557 flow_verbs_translate_item_vxlan_gpe(const struct rte_flow_item *item,
558 uint64_t *item_flags,
559 struct mlx5_flow *dev_flow)
561 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
562 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
563 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
564 struct ibv_flow_spec_tunnel vxlan_gpe = {
565 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
571 } id = { .vlan_id = 0, };
574 mask = &rte_flow_item_vxlan_gpe_mask;
576 memcpy(&id.vni[1], spec->vni, 3);
577 vxlan_gpe.val.tunnel_id = id.vlan_id;
578 memcpy(&id.vni[1], mask->vni, 3);
579 vxlan_gpe.mask.tunnel_id = id.vlan_id;
580 /* Remove unwanted bits from values. */
581 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
583 flow_verbs_spec_add(dev_flow, &vxlan_gpe, size);
584 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
585 *item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
589 * Update the protocol in Verbs IPv4/IPv6 spec.
591 * @param[in, out] attr
592 * Pointer to Verbs attributes structure.
594 * Specification type to search in order to update the IP protocol.
595 * @param[in] protocol
596 * Protocol value to set if none is present in the specification.
599 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
600 enum ibv_flow_spec_type search,
604 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
605 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
609 for (i = 0; i != attr->num_of_specs; ++i) {
610 if (hdr->type == search) {
612 struct ibv_flow_spec_ipv4_ext *ipv4;
613 struct ibv_flow_spec_ipv6 *ipv6;
617 case IBV_FLOW_SPEC_IPV4_EXT:
618 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
619 if (!ip.ipv4->val.proto) {
620 ip.ipv4->val.proto = protocol;
621 ip.ipv4->mask.proto = 0xff;
624 case IBV_FLOW_SPEC_IPV6:
625 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
626 if (!ip.ipv6->val.next_hdr) {
627 ip.ipv6->val.next_hdr = protocol;
628 ip.ipv6->mask.next_hdr = 0xff;
636 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
641 * Convert the @p item into a Verbs specification. This function assumes that
642 * the input is valid and that there is space to insert the requested item
646 * Item specification.
647 * @param[in, out] item_flags
648 * Bit mask that marks all detected items.
649 * @param[in, out] dev_flow
650 * Pointer to sepacific flow structure.
653 flow_verbs_translate_item_gre(const struct rte_flow_item *item __rte_unused,
654 uint64_t *item_flags,
655 struct mlx5_flow *dev_flow)
657 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
658 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
659 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
660 struct ibv_flow_spec_tunnel tunnel = {
661 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
665 const struct rte_flow_item_gre *spec = item->spec;
666 const struct rte_flow_item_gre *mask = item->mask;
667 unsigned int size = sizeof(struct ibv_flow_spec_gre);
668 struct ibv_flow_spec_gre tunnel = {
669 .type = IBV_FLOW_SPEC_GRE,
674 mask = &rte_flow_item_gre_mask;
676 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
677 tunnel.val.protocol = spec->protocol;
678 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
679 tunnel.mask.protocol = mask->protocol;
680 /* Remove unwanted bits from values. */
681 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
682 tunnel.val.protocol &= tunnel.mask.protocol;
683 tunnel.val.key &= tunnel.mask.key;
686 if (*item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
687 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
688 IBV_FLOW_SPEC_IPV4_EXT,
691 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
694 flow_verbs_spec_add(dev_flow, &tunnel, size);
695 verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
696 *item_flags |= MLX5_FLOW_LAYER_GRE;
700 * Convert the @p action into a Verbs specification. This function assumes that
701 * the input is valid and that there is space to insert the requested action
702 * into the flow. This function also return the action that was added.
705 * Item specification.
706 * @param[in, out] item_flags
707 * Bit mask that marks all detected items.
708 * @param[in, out] dev_flow
709 * Pointer to sepacific flow structure.
712 flow_verbs_translate_item_mpls(const struct rte_flow_item *item __rte_unused,
713 uint64_t *action_flags __rte_unused,
714 struct mlx5_flow *dev_flow __rte_unused)
716 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
717 const struct rte_flow_item_mpls *spec = item->spec;
718 const struct rte_flow_item_mpls *mask = item->mask;
719 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
720 struct ibv_flow_spec_mpls mpls = {
721 .type = IBV_FLOW_SPEC_MPLS,
726 mask = &rte_flow_item_mpls_mask;
728 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
729 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
730 /* Remove unwanted bits from values. */
731 mpls.val.label &= mpls.mask.label;
733 flow_verbs_spec_add(dev_flow, &mpls, size);
734 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
735 *action_flags |= MLX5_FLOW_LAYER_MPLS;
740 * Convert the @p action into a Verbs specification. This function assumes that
741 * the input is valid and that there is space to insert the requested action
742 * into the flow. This function also return the action that was added.
744 * @param[in, out] action_flags
745 * Pointer to the detected actions.
746 * @param[in] dev_flow
747 * Pointer to mlx5_flow.
750 flow_verbs_translate_action_drop(uint64_t *action_flags,
751 struct mlx5_flow *dev_flow)
753 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
754 struct ibv_flow_spec_action_drop drop = {
755 .type = IBV_FLOW_SPEC_ACTION_DROP,
759 flow_verbs_spec_add(dev_flow, &drop, size);
760 *action_flags |= MLX5_FLOW_ACTION_DROP;
764 * Convert the @p action into a Verbs specification. This function assumes that
765 * the input is valid and that there is space to insert the requested action
766 * into the flow. This function also return the action that was added.
769 * Action configuration.
770 * @param[in, out] action_flags
771 * Pointer to the detected actions.
772 * @param[in] dev_flow
773 * Pointer to mlx5_flow.
776 flow_verbs_translate_action_queue(const struct rte_flow_action *action,
777 uint64_t *action_flags,
778 struct mlx5_flow *dev_flow)
780 const struct rte_flow_action_queue *queue = action->conf;
781 struct rte_flow *flow = dev_flow->flow;
784 (*flow->queue)[0] = queue->index;
785 flow->rss.queue_num = 1;
786 *action_flags |= MLX5_FLOW_ACTION_QUEUE;
790 * Convert the @p action into a Verbs specification. This function assumes that
791 * the input is valid and that there is space to insert the requested action
792 * into the flow. This function also return the action that was added.
795 * Action configuration.
796 * @param[in, out] action_flags
797 * Pointer to the detected actions.
798 * @param[in] dev_flow
799 * Pointer to mlx5_flow.
802 flow_verbs_translate_action_rss(const struct rte_flow_action *action,
803 uint64_t *action_flags,
804 struct mlx5_flow *dev_flow)
806 const struct rte_flow_action_rss *rss = action->conf;
807 struct rte_flow *flow = dev_flow->flow;
810 memcpy((*flow->queue), rss->queue,
811 rss->queue_num * sizeof(uint16_t));
812 flow->rss.queue_num = rss->queue_num;
813 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
814 flow->rss.types = rss->types;
815 flow->rss.level = rss->level;
816 *action_flags |= MLX5_FLOW_ACTION_RSS;
820 * Convert the @p action into a Verbs specification. This function assumes that
821 * the input is valid and that there is space to insert the requested action
822 * into the flow. This function also return the action that was added.
825 * Action configuration.
826 * @param[in, out] action_flags
827 * Pointer to the detected actions.
828 * @param[in] dev_flow
829 * Pointer to mlx5_flow.
832 flow_verbs_translate_action_flag
833 (const struct rte_flow_action *action __rte_unused,
834 uint64_t *action_flags,
835 struct mlx5_flow *dev_flow)
837 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
838 struct ibv_flow_spec_action_tag tag = {
839 .type = IBV_FLOW_SPEC_ACTION_TAG,
841 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
843 *action_flags |= MLX5_FLOW_ACTION_MARK;
844 flow_verbs_spec_add(dev_flow, &tag, size);
848 * Update verbs specification to modify the flag to mark.
850 * @param[in, out] verbs
851 * Pointer to the mlx5_flow_verbs structure.
853 * Mark identifier to replace the flag.
856 flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
858 struct ibv_spec_header *hdr;
863 /* Update Verbs specification. */
864 hdr = (struct ibv_spec_header *)verbs->specs;
867 for (i = 0; i != verbs->attr->num_of_specs; ++i) {
868 if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
869 struct ibv_flow_spec_action_tag *t =
870 (struct ibv_flow_spec_action_tag *)hdr;
872 t->tag_id = mlx5_flow_mark_set(mark_id);
874 hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
879 * Convert the @p action into a Verbs specification. This function assumes that
880 * the input is valid and that there is space to insert the requested action
881 * into the flow. This function also return the action that was added.
884 * Action configuration.
885 * @param[in, out] action_flags
886 * Pointer to the detected actions.
887 * @param[in] dev_flow
888 * Pointer to mlx5_flow.
891 flow_verbs_translate_action_mark(const struct rte_flow_action *action,
892 uint64_t *action_flags,
893 struct mlx5_flow *dev_flow)
895 const struct rte_flow_action_mark *mark = action->conf;
896 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
897 struct ibv_flow_spec_action_tag tag = {
898 .type = IBV_FLOW_SPEC_ACTION_TAG,
901 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
903 if (*action_flags & MLX5_FLOW_ACTION_FLAG) {
904 flow_verbs_mark_update(verbs, mark->id);
907 tag.tag_id = mlx5_flow_mark_set(mark->id);
908 flow_verbs_spec_add(dev_flow, &tag, size);
910 *action_flags |= MLX5_FLOW_ACTION_MARK;
914 * Convert the @p action into a Verbs specification. This function assumes that
915 * the input is valid and that there is space to insert the requested action
916 * into the flow. This function also return the action that was added.
919 * Pointer to the Ethernet device structure.
921 * Action configuration.
922 * @param[in, out] action_flags
923 * Pointer to the detected actions.
924 * @param[in] dev_flow
925 * Pointer to mlx5_flow.
927 * Pointer to error structure.
930 * 0 On success else a negative errno value is returned and rte_errno is set.
933 flow_verbs_translate_action_count(struct rte_eth_dev *dev,
934 const struct rte_flow_action *action,
935 uint64_t *action_flags,
936 struct mlx5_flow *dev_flow,
937 struct rte_flow_error *error)
939 const struct rte_flow_action_count *count = action->conf;
940 struct rte_flow *flow = dev_flow->flow;
941 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_V42
942 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
943 struct ibv_flow_spec_counter_action counter = {
944 .type = IBV_FLOW_SPEC_ACTION_COUNT,
949 if (!flow->counter) {
950 flow->counter = flow_verbs_counter_new(dev, count->shared,
953 return rte_flow_error_set(error, rte_errno,
954 RTE_FLOW_ERROR_TYPE_ACTION,
959 *action_flags |= MLX5_FLOW_ACTION_COUNT;
960 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_V42
961 counter.counter_set_handle = flow->counter->cs->handle;
962 flow_verbs_spec_add(dev_flow, &counter, size);
968 * Internal validation function. For validating both actions and items.
971 * Pointer to the Ethernet device structure.
973 * Pointer to the flow attributes.
975 * Pointer to the list of items.
977 * Pointer to the list of actions.
979 * Pointer to the error structure.
982 * 0 on success, a negative errno value otherwise and rte_errno is set.
985 flow_verbs_validate(struct rte_eth_dev *dev,
986 const struct rte_flow_attr *attr,
987 const struct rte_flow_item items[],
988 const struct rte_flow_action actions[],
989 struct rte_flow_error *error)
992 uint32_t action_flags = 0;
993 uint32_t item_flags = 0;
995 uint8_t next_protocol = 0xff;
999 ret = mlx5_flow_validate_attributes(dev, attr, error);
1002 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1004 switch (items->type) {
1005 case RTE_FLOW_ITEM_TYPE_VOID:
1007 case RTE_FLOW_ITEM_TYPE_ETH:
1008 ret = mlx5_flow_validate_item_eth(items, item_flags,
1012 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1013 MLX5_FLOW_LAYER_OUTER_L2;
1015 case RTE_FLOW_ITEM_TYPE_VLAN:
1016 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1020 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1021 MLX5_FLOW_LAYER_OUTER_VLAN;
1023 case RTE_FLOW_ITEM_TYPE_IPV4:
1024 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1028 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1029 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1030 if (items->mask != NULL &&
1031 ((const struct rte_flow_item_ipv4 *)
1032 items->mask)->hdr.next_proto_id)
1034 ((const struct rte_flow_item_ipv4 *)
1035 (items->spec))->hdr.next_proto_id;
1037 case RTE_FLOW_ITEM_TYPE_IPV6:
1038 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1042 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1043 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1044 if (items->mask != NULL &&
1045 ((const struct rte_flow_item_ipv6 *)
1046 items->mask)->hdr.proto)
1048 ((const struct rte_flow_item_ipv6 *)
1049 items->spec)->hdr.proto;
1051 case RTE_FLOW_ITEM_TYPE_UDP:
1052 ret = mlx5_flow_validate_item_udp(items, item_flags,
1057 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1058 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1060 case RTE_FLOW_ITEM_TYPE_TCP:
1061 ret = mlx5_flow_validate_item_tcp
1064 &rte_flow_item_tcp_mask,
1068 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1069 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1071 case RTE_FLOW_ITEM_TYPE_VXLAN:
1072 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1076 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1078 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1079 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1084 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1086 case RTE_FLOW_ITEM_TYPE_GRE:
1087 ret = mlx5_flow_validate_item_gre(items, item_flags,
1088 next_protocol, error);
1091 item_flags |= MLX5_FLOW_LAYER_GRE;
1093 case RTE_FLOW_ITEM_TYPE_MPLS:
1094 ret = mlx5_flow_validate_item_mpls(items, item_flags,
1099 if (next_protocol != 0xff &&
1100 next_protocol != IPPROTO_MPLS)
1101 return rte_flow_error_set
1103 RTE_FLOW_ERROR_TYPE_ITEM, items,
1104 "protocol filtering not compatible"
1105 " with MPLS layer");
1106 item_flags |= MLX5_FLOW_LAYER_MPLS;
1109 return rte_flow_error_set(error, ENOTSUP,
1110 RTE_FLOW_ERROR_TYPE_ITEM,
1111 NULL, "item not supported");
1114 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1115 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1116 switch (actions->type) {
1117 case RTE_FLOW_ACTION_TYPE_VOID:
1119 case RTE_FLOW_ACTION_TYPE_FLAG:
1120 ret = mlx5_flow_validate_action_flag(action_flags,
1125 action_flags |= MLX5_FLOW_ACTION_FLAG;
1127 case RTE_FLOW_ACTION_TYPE_MARK:
1128 ret = mlx5_flow_validate_action_mark(actions,
1134 action_flags |= MLX5_FLOW_ACTION_MARK;
1136 case RTE_FLOW_ACTION_TYPE_DROP:
1137 ret = mlx5_flow_validate_action_drop(action_flags,
1142 action_flags |= MLX5_FLOW_ACTION_DROP;
1144 case RTE_FLOW_ACTION_TYPE_QUEUE:
1145 ret = mlx5_flow_validate_action_queue(actions,
1151 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1153 case RTE_FLOW_ACTION_TYPE_RSS:
1154 ret = mlx5_flow_validate_action_rss(actions,
1160 action_flags |= MLX5_FLOW_ACTION_RSS;
1162 case RTE_FLOW_ACTION_TYPE_COUNT:
1163 ret = mlx5_flow_validate_action_count(dev, attr, error);
1166 action_flags |= MLX5_FLOW_ACTION_COUNT;
1169 return rte_flow_error_set(error, ENOTSUP,
1170 RTE_FLOW_ERROR_TYPE_ACTION,
1172 "action not supported");
1175 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1176 return rte_flow_error_set(error, EINVAL,
1177 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1178 "no fate action is found");
1183 * Calculate the required bytes that are needed for the action part of the verbs
1184 * flow, in addtion returns bit-fields with all the detected action, in order to
1185 * avoid another interation over the actions.
1187 * @param[in] actions
1188 * Pointer to the list of actions.
1189 * @param[out] action_flags
1190 * Pointer to the detected actions.
1193 * The size of the memory needed for all actions.
1196 flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
1197 uint64_t *action_flags)
1200 uint64_t detected_actions = 0;
1202 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1203 switch (actions->type) {
1204 case RTE_FLOW_ACTION_TYPE_VOID:
1206 case RTE_FLOW_ACTION_TYPE_FLAG:
1207 size += sizeof(struct ibv_flow_spec_action_tag);
1208 detected_actions |= MLX5_FLOW_ACTION_FLAG;
1210 case RTE_FLOW_ACTION_TYPE_MARK:
1211 size += sizeof(struct ibv_flow_spec_action_tag);
1212 detected_actions |= MLX5_FLOW_ACTION_MARK;
1214 case RTE_FLOW_ACTION_TYPE_DROP:
1215 size += sizeof(struct ibv_flow_spec_action_drop);
1216 detected_actions |= MLX5_FLOW_ACTION_DROP;
1218 case RTE_FLOW_ACTION_TYPE_QUEUE:
1219 detected_actions |= MLX5_FLOW_ACTION_QUEUE;
1221 case RTE_FLOW_ACTION_TYPE_RSS:
1222 detected_actions |= MLX5_FLOW_ACTION_RSS;
1224 case RTE_FLOW_ACTION_TYPE_COUNT:
1225 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_V42
1226 size += sizeof(struct ibv_flow_spec_counter_action);
1228 detected_actions |= MLX5_FLOW_ACTION_COUNT;
1234 *action_flags = detected_actions;
1239 * Calculate the required bytes that are needed for the item part of the verbs
1240 * flow, in addtion returns bit-fields with all the detected action, in order to
1241 * avoid another interation over the actions.
1243 * @param[in] actions
1244 * Pointer to the list of items.
1245 * @param[in, out] item_flags
1246 * Pointer to the detected items.
1249 * The size of the memory needed for all items.
1252 flow_verbs_get_items_and_size(const struct rte_flow_item items[],
1253 uint64_t *item_flags)
1256 uint64_t detected_items = 0;
1257 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
1259 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1260 switch (items->type) {
1261 case RTE_FLOW_ITEM_TYPE_VOID:
1263 case RTE_FLOW_ITEM_TYPE_ETH:
1264 size += sizeof(struct ibv_flow_spec_eth);
1265 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1266 MLX5_FLOW_LAYER_OUTER_L2;
1268 case RTE_FLOW_ITEM_TYPE_VLAN:
1269 size += sizeof(struct ibv_flow_spec_eth);
1270 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1271 MLX5_FLOW_LAYER_OUTER_VLAN;
1273 case RTE_FLOW_ITEM_TYPE_IPV4:
1274 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1275 detected_items |= tunnel ?
1276 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1277 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1279 case RTE_FLOW_ITEM_TYPE_IPV6:
1280 size += sizeof(struct ibv_flow_spec_ipv6);
1281 detected_items |= tunnel ?
1282 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1283 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1285 case RTE_FLOW_ITEM_TYPE_UDP:
1286 size += sizeof(struct ibv_flow_spec_tcp_udp);
1287 detected_items |= tunnel ?
1288 MLX5_FLOW_LAYER_INNER_L4_UDP :
1289 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1291 case RTE_FLOW_ITEM_TYPE_TCP:
1292 size += sizeof(struct ibv_flow_spec_tcp_udp);
1293 detected_items |= tunnel ?
1294 MLX5_FLOW_LAYER_INNER_L4_TCP :
1295 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1297 case RTE_FLOW_ITEM_TYPE_VXLAN:
1298 size += sizeof(struct ibv_flow_spec_tunnel);
1299 detected_items |= MLX5_FLOW_LAYER_VXLAN;
1301 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1302 size += sizeof(struct ibv_flow_spec_tunnel);
1303 detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
1305 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1306 case RTE_FLOW_ITEM_TYPE_GRE:
1307 size += sizeof(struct ibv_flow_spec_gre);
1308 detected_items |= MLX5_FLOW_LAYER_GRE;
1310 case RTE_FLOW_ITEM_TYPE_MPLS:
1311 size += sizeof(struct ibv_flow_spec_mpls);
1312 detected_items |= MLX5_FLOW_LAYER_MPLS;
1315 case RTE_FLOW_ITEM_TYPE_GRE:
1316 size += sizeof(struct ibv_flow_spec_tunnel);
1317 detected_items |= MLX5_FLOW_LAYER_TUNNEL;
1324 *item_flags = detected_items;
1329 * Internal preparation function. Allocate mlx5_flow with the required size.
1330 * The required size is calculate based on the actions and items. This function
1331 * also returns the detected actions and items for later use.
1334 * Pointer to the flow attributes.
1336 * Pointer to the list of items.
1337 * @param[in] actions
1338 * Pointer to the list of actions.
1339 * @param[out] item_flags
1340 * Pointer to bit mask of all items detected.
1341 * @param[out] action_flags
1342 * Pointer to bit mask of all actions detected.
1344 * Pointer to the error structure.
1347 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1350 static struct mlx5_flow *
1351 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1352 const struct rte_flow_item items[],
1353 const struct rte_flow_action actions[],
1354 uint64_t *item_flags,
1355 uint64_t *action_flags,
1356 struct rte_flow_error *error)
1358 uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1359 struct mlx5_flow *flow;
1361 size += flow_verbs_get_actions_and_size(actions, action_flags);
1362 size += flow_verbs_get_items_and_size(items, item_flags);
1363 flow = rte_calloc(__func__, 1, size, 0);
1365 rte_flow_error_set(error, ENOMEM,
1366 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1367 "not enough memory to create flow");
1370 flow->verbs.attr = (void *)(flow + 1);
1372 (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
1377 * Fill the flow with verb spec.
1380 * Pointer to Ethernet device.
1381 * @param[in, out] dev_flow
1382 * Pointer to the mlx5 flow.
1384 * Pointer to the flow attributes.
1386 * Pointer to the list of items.
1387 * @param[in] actions
1388 * Pointer to the list of actions.
1390 * Pointer to the error structure.
1393 * 0 on success, else a negative errno value otherwise and rte_ernno is set.
1396 flow_verbs_translate(struct rte_eth_dev *dev,
1397 struct mlx5_flow *dev_flow,
1398 const struct rte_flow_attr *attr,
1399 const struct rte_flow_item items[],
1400 const struct rte_flow_action actions[],
1401 struct rte_flow_error *error)
1403 uint64_t action_flags = 0;
1404 uint64_t item_flags = 0;
1405 uint64_t priority = attr->priority;
1406 struct priv *priv = dev->data->dev_private;
1408 if (priority == MLX5_FLOW_PRIO_RSVD)
1409 priority = priv->config.flow_prio - 1;
1410 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1412 switch (actions->type) {
1413 case RTE_FLOW_ACTION_TYPE_VOID:
1415 case RTE_FLOW_ACTION_TYPE_FLAG:
1416 flow_verbs_translate_action_flag(actions,
1420 case RTE_FLOW_ACTION_TYPE_MARK:
1421 flow_verbs_translate_action_mark(actions,
1425 case RTE_FLOW_ACTION_TYPE_DROP:
1426 flow_verbs_translate_action_drop(&action_flags,
1429 case RTE_FLOW_ACTION_TYPE_QUEUE:
1430 flow_verbs_translate_action_queue(actions,
1434 case RTE_FLOW_ACTION_TYPE_RSS:
1435 flow_verbs_translate_action_rss(actions,
1439 case RTE_FLOW_ACTION_TYPE_COUNT:
1440 ret = flow_verbs_translate_action_count(dev,
1449 return rte_flow_error_set(error, ENOTSUP,
1450 RTE_FLOW_ERROR_TYPE_ACTION,
1452 "action not supported");
1455 dev_flow->flow->actions |= action_flags;
1456 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1457 switch (items->type) {
1458 case RTE_FLOW_ITEM_TYPE_VOID:
1460 case RTE_FLOW_ITEM_TYPE_ETH:
1461 flow_verbs_translate_item_eth(items, &item_flags,
1464 case RTE_FLOW_ITEM_TYPE_VLAN:
1465 flow_verbs_translate_item_vlan(items, &item_flags,
1468 case RTE_FLOW_ITEM_TYPE_IPV4:
1469 flow_verbs_translate_item_ipv4(items, &item_flags,
1472 case RTE_FLOW_ITEM_TYPE_IPV6:
1473 flow_verbs_translate_item_ipv6(items, &item_flags,
1476 case RTE_FLOW_ITEM_TYPE_UDP:
1477 flow_verbs_translate_item_udp(items, &item_flags,
1480 case RTE_FLOW_ITEM_TYPE_TCP:
1481 flow_verbs_translate_item_tcp(items, &item_flags,
1484 case RTE_FLOW_ITEM_TYPE_VXLAN:
1485 flow_verbs_translate_item_vxlan(items, &item_flags,
1488 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1489 flow_verbs_translate_item_vxlan_gpe(items, &item_flags,
1492 case RTE_FLOW_ITEM_TYPE_GRE:
1493 flow_verbs_translate_item_gre(items, &item_flags,
1496 case RTE_FLOW_ITEM_TYPE_MPLS:
1497 flow_verbs_translate_item_mpls(items, &item_flags,
1501 return rte_flow_error_set(error, ENOTSUP,
1502 RTE_FLOW_ERROR_TYPE_ITEM,
1504 "item not supported");
1507 dev_flow->verbs.attr->priority =
1508 mlx5_flow_adjust_priority(dev, priority,
1509 dev_flow->verbs.attr->priority);
1514 * Remove the flow from the NIC but keeps it in memory.
1517 * Pointer to the Ethernet device structure.
1518 * @param[in, out] flow
1519 * Pointer to flow structure.
1522 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1524 struct mlx5_flow_verbs *verbs;
1525 struct mlx5_flow *dev_flow;
1529 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1530 verbs = &dev_flow->verbs;
1532 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1536 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1537 mlx5_hrxq_drop_release(dev);
1539 mlx5_hrxq_release(dev, verbs->hrxq);
1543 if (flow->counter) {
1544 flow_verbs_counter_release(flow->counter);
1545 flow->counter = NULL;
1550 * Remove the flow from the NIC and the memory.
1553 * Pointer to the Ethernet device structure.
1554 * @param[in, out] flow
1555 * Pointer to flow structure.
1558 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1560 struct mlx5_flow *dev_flow;
1564 flow_verbs_remove(dev, flow);
1565 while (!LIST_EMPTY(&flow->dev_flows)) {
1566 dev_flow = LIST_FIRST(&flow->dev_flows);
1567 LIST_REMOVE(dev_flow, next);
1573 * Apply the flow to the NIC.
1576 * Pointer to the Ethernet device structure.
1577 * @param[in, out] flow
1578 * Pointer to flow structure.
1580 * Pointer to error structure.
1583 * 0 on success, a negative errno value otherwise and rte_errno is set.
1586 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1587 struct rte_flow_error *error)
1589 struct mlx5_flow_verbs *verbs;
1590 struct mlx5_flow *dev_flow;
1593 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1594 verbs = &dev_flow->verbs;
1595 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1596 verbs->hrxq = mlx5_hrxq_drop_new(dev);
1600 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1601 "cannot get drop hash queue");
1605 struct mlx5_hrxq *hrxq;
1607 hrxq = mlx5_hrxq_get(dev, flow->key,
1608 MLX5_RSS_HASH_KEY_LEN,
1611 flow->rss.queue_num);
1613 hrxq = mlx5_hrxq_new(dev, flow->key,
1614 MLX5_RSS_HASH_KEY_LEN,
1617 flow->rss.queue_num,
1619 MLX5_FLOW_LAYER_TUNNEL));
1623 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1624 "cannot get hash queue");
1629 verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1632 rte_flow_error_set(error, errno,
1633 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1635 "hardware refuses to create flow");
1641 err = rte_errno; /* Save rte_errno before cleanup. */
1642 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1643 verbs = &dev_flow->verbs;
1645 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1646 mlx5_hrxq_drop_release(dev);
1648 mlx5_hrxq_release(dev, verbs->hrxq);
1652 rte_errno = err; /* Restore rte_errno. */
1659 * @see rte_flow_query()
1663 flow_verbs_query_count(struct rte_eth_dev *dev __rte_unused,
1664 struct rte_flow *flow __rte_unused,
1665 void *data __rte_unused,
1666 struct rte_flow_error *error)
1668 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_V42
1669 if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
1670 struct rte_flow_query_count *qc = data;
1671 uint64_t counters[2] = {0, 0};
1672 struct ibv_query_counter_set_attr query_cs_attr = {
1673 .cs = flow->counter->cs,
1674 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
1676 struct ibv_counter_set_data query_out = {
1678 .outlen = 2 * sizeof(uint64_t),
1680 int err = mlx5_glue->query_counter_set(&query_cs_attr,
1684 return rte_flow_error_set
1686 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1688 "cannot read counter");
1691 qc->hits = counters[0] - flow->counter->hits;
1692 qc->bytes = counters[1] - flow->counter->bytes;
1694 flow->counter->hits = counters[0];
1695 flow->counter->bytes = counters[1];
1699 return rte_flow_error_set(error, EINVAL,
1700 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1702 "flow does not have counter");
1704 return rte_flow_error_set(error, ENOTSUP,
1705 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1707 "counters are not available");
1713 * @see rte_flow_query()
1717 flow_verbs_query(struct rte_eth_dev *dev,
1718 struct rte_flow *flow,
1719 const struct rte_flow_action *actions,
1721 struct rte_flow_error *error)
1725 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1726 switch (actions->type) {
1727 case RTE_FLOW_ACTION_TYPE_VOID:
1729 case RTE_FLOW_ACTION_TYPE_COUNT:
1730 ret = flow_verbs_query_count(dev, flow, data, error);
1733 return rte_flow_error_set(error, ENOTSUP,
1734 RTE_FLOW_ERROR_TYPE_ACTION,
1736 "action not supported");
1742 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1743 .validate = flow_verbs_validate,
1744 .prepare = flow_verbs_prepare,
1745 .translate = flow_verbs_translate,
1746 .apply = flow_verbs_apply,
1747 .remove = flow_verbs_remove,
1748 .destroy = flow_verbs_destroy,
1749 .query = flow_verbs_query,