1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_flow.h"
32 #include "mlx5_glue.h"
34 #include "mlx5_rxtx.h"
36 #define VERBS_SPEC_INNER(item_flags) \
37 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
40 * Create Verbs flow counter with Verbs library.
43 * Pointer to the Ethernet device structure.
44 * @param[in, out] counter
45 * mlx5 flow counter object, contains the counter id,
46 * handle of created Verbs flow counter is returned
47 * in cs field (if counters are supported).
50 * 0 On success else a negative errno value is returned
51 * and rte_errno is set.
54 flow_verbs_counter_create(struct rte_eth_dev *dev,
55 struct mlx5_flow_counter *counter)
57 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
58 struct mlx5_priv *priv = dev->data->dev_private;
59 struct ibv_context *ctx = priv->sh->ctx;
60 struct ibv_counter_set_init_attr init = {
61 .counter_set_id = counter->id};
63 counter->cs = mlx5_glue->create_counter_set(ctx, &init);
69 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
70 struct mlx5_priv *priv = dev->data->dev_private;
71 struct ibv_context *ctx = priv->sh->ctx;
72 struct ibv_counters_init_attr init = {0};
73 struct ibv_counter_attach_attr attach;
76 memset(&attach, 0, sizeof(attach));
77 counter->cs = mlx5_glue->create_counters(ctx, &init);
82 attach.counter_desc = IBV_COUNTER_PACKETS;
84 ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
86 attach.counter_desc = IBV_COUNTER_BYTES;
88 ret = mlx5_glue->attach_counters
89 (counter->cs, &attach, NULL);
92 claim_zero(mlx5_glue->destroy_counters(counter->cs));
107 * Get a flow counter.
110 * Pointer to the Ethernet device structure.
112 * Indicate if this counter is shared with other flows.
114 * Counter identifier.
117 * A pointer to the counter, NULL otherwise and rte_errno is set.
119 static struct mlx5_flow_counter *
120 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
122 struct mlx5_priv *priv = dev->data->dev_private;
123 struct mlx5_flow_counter *cnt;
127 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
128 if (cnt->shared && cnt->id == id) {
134 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
140 cnt->shared = shared;
144 /* Create counter with Verbs. */
145 ret = flow_verbs_counter_create(dev, cnt);
147 TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
150 /* Some error occurred in Verbs library. */
157 * Release a flow counter.
160 * Pointer to the Ethernet device structure.
162 * Pointer to the counter handler.
165 flow_verbs_counter_release(struct rte_eth_dev *dev,
166 struct mlx5_flow_counter *counter)
168 struct mlx5_priv *priv = dev->data->dev_private;
170 if (--counter->ref_cnt == 0) {
171 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
172 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
173 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
174 claim_zero(mlx5_glue->destroy_counters(counter->cs));
176 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
182 * Query a flow counter via Verbs library call.
184 * @see rte_flow_query()
188 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
189 struct rte_flow *flow, void *data,
190 struct rte_flow_error *error)
192 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
193 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
194 if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
195 struct rte_flow_query_count *qc = data;
196 uint64_t counters[2] = {0, 0};
197 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
198 struct ibv_query_counter_set_attr query_cs_attr = {
199 .cs = flow->counter->cs,
200 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
202 struct ibv_counter_set_data query_out = {
204 .outlen = 2 * sizeof(uint64_t),
206 int err = mlx5_glue->query_counter_set(&query_cs_attr,
208 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
209 int err = mlx5_glue->query_counters
210 (flow->counter->cs, counters,
212 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
215 return rte_flow_error_set
217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
219 "cannot read counter");
222 qc->hits = counters[0] - flow->counter->hits;
223 qc->bytes = counters[1] - flow->counter->bytes;
225 flow->counter->hits = counters[0];
226 flow->counter->bytes = counters[1];
230 return rte_flow_error_set(error, EINVAL,
231 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233 "flow does not have counter");
237 return rte_flow_error_set(error, ENOTSUP,
238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
240 "counters are not available");
245 * Add a verbs item specification into @p verbs.
248 * Pointer to verbs structure.
250 * Create specification.
252 * Size in bytes of the specification to copy.
255 flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
261 assert(verbs->specs);
262 dst = (void *)(verbs->specs + verbs->size);
263 memcpy(dst, src, size);
264 ++verbs->attr->num_of_specs;
269 * Convert the @p item into a Verbs specification. This function assumes that
270 * the input is valid and that there is space to insert the requested item
273 * @param[in, out] dev_flow
274 * Pointer to dev_flow structure.
276 * Item specification.
277 * @param[in] item_flags
281 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
282 const struct rte_flow_item *item,
285 const struct rte_flow_item_eth *spec = item->spec;
286 const struct rte_flow_item_eth *mask = item->mask;
287 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
288 struct ibv_flow_spec_eth eth = {
289 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
294 mask = &rte_flow_item_eth_mask;
298 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
300 memcpy(ð.val.src_mac, spec->src.addr_bytes,
302 eth.val.ether_type = spec->type;
303 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
305 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
307 eth.mask.ether_type = mask->type;
308 /* Remove unwanted bits from values. */
309 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
310 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
311 eth.val.src_mac[i] &= eth.mask.src_mac[i];
313 eth.val.ether_type &= eth.mask.ether_type;
315 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
319 * Update the VLAN tag in the Verbs Ethernet specification.
320 * This function assumes that the input is valid and there is space to add
321 * the requested item.
323 * @param[in, out] attr
324 * Pointer to Verbs attributes structure.
326 * Verbs structure containing the VLAN information to copy.
329 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
330 struct ibv_flow_spec_eth *eth)
333 const enum ibv_flow_spec_type search = eth->type;
334 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
335 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
337 for (i = 0; i != attr->num_of_specs; ++i) {
338 if (hdr->type == search) {
339 struct ibv_flow_spec_eth *e =
340 (struct ibv_flow_spec_eth *)hdr;
342 e->val.vlan_tag = eth->val.vlan_tag;
343 e->mask.vlan_tag = eth->mask.vlan_tag;
344 e->val.ether_type = eth->val.ether_type;
345 e->mask.ether_type = eth->mask.ether_type;
348 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
353 * Convert the @p item into a Verbs specification. This function assumes that
354 * the input is valid and that there is space to insert the requested item
357 * @param[in, out] dev_flow
358 * Pointer to dev_flow structure.
360 * Item specification.
361 * @param[in] item_flags
365 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
366 const struct rte_flow_item *item,
369 const struct rte_flow_item_vlan *spec = item->spec;
370 const struct rte_flow_item_vlan *mask = item->mask;
371 unsigned int size = sizeof(struct ibv_flow_spec_eth);
372 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
373 struct ibv_flow_spec_eth eth = {
374 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
377 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
378 MLX5_FLOW_LAYER_OUTER_L2;
381 mask = &rte_flow_item_vlan_mask;
383 eth.val.vlan_tag = spec->tci;
384 eth.mask.vlan_tag = mask->tci;
385 eth.val.vlan_tag &= eth.mask.vlan_tag;
386 eth.val.ether_type = spec->inner_type;
387 eth.mask.ether_type = mask->inner_type;
388 eth.val.ether_type &= eth.mask.ether_type;
390 if (!(item_flags & l2m))
391 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
393 flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
397 * Convert the @p item into a Verbs specification. This function assumes that
398 * the input is valid and that there is space to insert the requested item
401 * @param[in, out] dev_flow
402 * Pointer to dev_flow structure.
404 * Item specification.
405 * @param[in] item_flags
409 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
410 const struct rte_flow_item *item,
413 const struct rte_flow_item_ipv4 *spec = item->spec;
414 const struct rte_flow_item_ipv4 *mask = item->mask;
415 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
416 struct ibv_flow_spec_ipv4_ext ipv4 = {
417 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
422 mask = &rte_flow_item_ipv4_mask;
424 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
425 .src_ip = spec->hdr.src_addr,
426 .dst_ip = spec->hdr.dst_addr,
427 .proto = spec->hdr.next_proto_id,
428 .tos = spec->hdr.type_of_service,
430 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
431 .src_ip = mask->hdr.src_addr,
432 .dst_ip = mask->hdr.dst_addr,
433 .proto = mask->hdr.next_proto_id,
434 .tos = mask->hdr.type_of_service,
436 /* Remove unwanted bits from values. */
437 ipv4.val.src_ip &= ipv4.mask.src_ip;
438 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
439 ipv4.val.proto &= ipv4.mask.proto;
440 ipv4.val.tos &= ipv4.mask.tos;
442 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
446 * Convert the @p item into a Verbs specification. This function assumes that
447 * the input is valid and that there is space to insert the requested item
450 * @param[in, out] dev_flow
451 * Pointer to dev_flow structure.
453 * Item specification.
454 * @param[in] item_flags
458 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
459 const struct rte_flow_item *item,
462 const struct rte_flow_item_ipv6 *spec = item->spec;
463 const struct rte_flow_item_ipv6 *mask = item->mask;
464 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
465 struct ibv_flow_spec_ipv6 ipv6 = {
466 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
471 mask = &rte_flow_item_ipv6_mask;
474 uint32_t vtc_flow_val;
475 uint32_t vtc_flow_mask;
477 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
478 RTE_DIM(ipv6.val.src_ip));
479 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
480 RTE_DIM(ipv6.val.dst_ip));
481 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
482 RTE_DIM(ipv6.mask.src_ip));
483 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
484 RTE_DIM(ipv6.mask.dst_ip));
485 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
486 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
487 ipv6.val.flow_label =
488 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
489 RTE_IPV6_HDR_FL_SHIFT);
490 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
491 RTE_IPV6_HDR_TC_SHIFT;
492 ipv6.val.next_hdr = spec->hdr.proto;
493 ipv6.val.hop_limit = spec->hdr.hop_limits;
494 ipv6.mask.flow_label =
495 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
496 RTE_IPV6_HDR_FL_SHIFT);
497 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
498 RTE_IPV6_HDR_TC_SHIFT;
499 ipv6.mask.next_hdr = mask->hdr.proto;
500 ipv6.mask.hop_limit = mask->hdr.hop_limits;
501 /* Remove unwanted bits from values. */
502 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
503 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
504 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
506 ipv6.val.flow_label &= ipv6.mask.flow_label;
507 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
508 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
509 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
511 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
515 * Convert the @p item into a Verbs specification. This function assumes that
516 * the input is valid and that there is space to insert the requested item
519 * @param[in, out] dev_flow
520 * Pointer to dev_flow structure.
522 * Item specification.
523 * @param[in] item_flags
527 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
528 const struct rte_flow_item *item,
529 uint64_t item_flags __rte_unused)
531 const struct rte_flow_item_tcp *spec = item->spec;
532 const struct rte_flow_item_tcp *mask = item->mask;
533 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
534 struct ibv_flow_spec_tcp_udp tcp = {
535 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
540 mask = &rte_flow_item_tcp_mask;
542 tcp.val.dst_port = spec->hdr.dst_port;
543 tcp.val.src_port = spec->hdr.src_port;
544 tcp.mask.dst_port = mask->hdr.dst_port;
545 tcp.mask.src_port = mask->hdr.src_port;
546 /* Remove unwanted bits from values. */
547 tcp.val.src_port &= tcp.mask.src_port;
548 tcp.val.dst_port &= tcp.mask.dst_port;
550 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
554 * Convert the @p item into a Verbs specification. This function assumes that
555 * the input is valid and that there is space to insert the requested item
558 * @param[in, out] dev_flow
559 * Pointer to dev_flow structure.
561 * Item specification.
562 * @param[in] item_flags
566 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
567 const struct rte_flow_item *item,
568 uint64_t item_flags __rte_unused)
570 const struct rte_flow_item_udp *spec = item->spec;
571 const struct rte_flow_item_udp *mask = item->mask;
572 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
573 struct ibv_flow_spec_tcp_udp udp = {
574 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
579 mask = &rte_flow_item_udp_mask;
581 udp.val.dst_port = spec->hdr.dst_port;
582 udp.val.src_port = spec->hdr.src_port;
583 udp.mask.dst_port = mask->hdr.dst_port;
584 udp.mask.src_port = mask->hdr.src_port;
585 /* Remove unwanted bits from values. */
586 udp.val.src_port &= udp.mask.src_port;
587 udp.val.dst_port &= udp.mask.dst_port;
589 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
593 * Convert the @p item into a Verbs specification. This function assumes that
594 * the input is valid and that there is space to insert the requested item
597 * @param[in, out] dev_flow
598 * Pointer to dev_flow structure.
600 * Item specification.
601 * @param[in] item_flags
605 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
606 const struct rte_flow_item *item,
607 uint64_t item_flags __rte_unused)
609 const struct rte_flow_item_vxlan *spec = item->spec;
610 const struct rte_flow_item_vxlan *mask = item->mask;
611 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
612 struct ibv_flow_spec_tunnel vxlan = {
613 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
619 } id = { .vlan_id = 0, };
622 mask = &rte_flow_item_vxlan_mask;
624 memcpy(&id.vni[1], spec->vni, 3);
625 vxlan.val.tunnel_id = id.vlan_id;
626 memcpy(&id.vni[1], mask->vni, 3);
627 vxlan.mask.tunnel_id = id.vlan_id;
628 /* Remove unwanted bits from values. */
629 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
631 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
635 * Convert the @p item into a Verbs specification. This function assumes that
636 * the input is valid and that there is space to insert the requested item
639 * @param[in, out] dev_flow
640 * Pointer to dev_flow structure.
642 * Item specification.
643 * @param[in] item_flags
647 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
648 const struct rte_flow_item *item,
649 uint64_t item_flags __rte_unused)
651 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
652 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
653 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
654 struct ibv_flow_spec_tunnel vxlan_gpe = {
655 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
661 } id = { .vlan_id = 0, };
664 mask = &rte_flow_item_vxlan_gpe_mask;
666 memcpy(&id.vni[1], spec->vni, 3);
667 vxlan_gpe.val.tunnel_id = id.vlan_id;
668 memcpy(&id.vni[1], mask->vni, 3);
669 vxlan_gpe.mask.tunnel_id = id.vlan_id;
670 /* Remove unwanted bits from values. */
671 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
673 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
677 * Update the protocol in Verbs IPv4/IPv6 spec.
679 * @param[in, out] attr
680 * Pointer to Verbs attributes structure.
682 * Specification type to search in order to update the IP protocol.
683 * @param[in] protocol
684 * Protocol value to set if none is present in the specification.
687 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
688 enum ibv_flow_spec_type search,
692 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
693 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
697 for (i = 0; i != attr->num_of_specs; ++i) {
698 if (hdr->type == search) {
700 struct ibv_flow_spec_ipv4_ext *ipv4;
701 struct ibv_flow_spec_ipv6 *ipv6;
705 case IBV_FLOW_SPEC_IPV4_EXT:
706 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
707 if (!ip.ipv4->val.proto) {
708 ip.ipv4->val.proto = protocol;
709 ip.ipv4->mask.proto = 0xff;
712 case IBV_FLOW_SPEC_IPV6:
713 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
714 if (!ip.ipv6->val.next_hdr) {
715 ip.ipv6->val.next_hdr = protocol;
716 ip.ipv6->mask.next_hdr = 0xff;
724 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
729 * Convert the @p item into a Verbs specification. This function assumes that
730 * the input is valid and that there is space to insert the requested item
733 * @param[in, out] dev_flow
734 * Pointer to dev_flow structure.
736 * Item specification.
737 * @param[in] item_flags
741 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
742 const struct rte_flow_item *item __rte_unused,
745 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
746 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
747 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
748 struct ibv_flow_spec_tunnel tunnel = {
749 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
753 const struct rte_flow_item_gre *spec = item->spec;
754 const struct rte_flow_item_gre *mask = item->mask;
755 unsigned int size = sizeof(struct ibv_flow_spec_gre);
756 struct ibv_flow_spec_gre tunnel = {
757 .type = IBV_FLOW_SPEC_GRE,
762 mask = &rte_flow_item_gre_mask;
764 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
765 tunnel.val.protocol = spec->protocol;
766 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
767 tunnel.mask.protocol = mask->protocol;
768 /* Remove unwanted bits from values. */
769 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
770 tunnel.val.protocol &= tunnel.mask.protocol;
771 tunnel.val.key &= tunnel.mask.key;
774 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
775 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
776 IBV_FLOW_SPEC_IPV4_EXT,
779 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
782 flow_verbs_spec_add(verbs, &tunnel, size);
786 * Convert the @p action into a Verbs specification. This function assumes that
787 * the input is valid and that there is space to insert the requested action
788 * into the flow. This function also return the action that was added.
790 * @param[in, out] dev_flow
791 * Pointer to dev_flow structure.
793 * Item specification.
794 * @param[in] item_flags
798 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
799 const struct rte_flow_item *item __rte_unused,
800 uint64_t item_flags __rte_unused)
802 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
803 const struct rte_flow_item_mpls *spec = item->spec;
804 const struct rte_flow_item_mpls *mask = item->mask;
805 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
806 struct ibv_flow_spec_mpls mpls = {
807 .type = IBV_FLOW_SPEC_MPLS,
812 mask = &rte_flow_item_mpls_mask;
814 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
815 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
816 /* Remove unwanted bits from values. */
817 mpls.val.label &= mpls.mask.label;
819 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
824 * Convert the @p action into a Verbs specification. This function assumes that
825 * the input is valid and that there is space to insert the requested action
828 * @param[in] dev_flow
829 * Pointer to mlx5_flow.
831 * Action configuration.
834 flow_verbs_translate_action_drop
835 (struct mlx5_flow *dev_flow,
836 const struct rte_flow_action *action __rte_unused)
838 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
839 struct ibv_flow_spec_action_drop drop = {
840 .type = IBV_FLOW_SPEC_ACTION_DROP,
844 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
848 * Convert the @p action into a Verbs specification. This function assumes that
849 * the input is valid and that there is space to insert the requested action
852 * @param[in] dev_flow
853 * Pointer to mlx5_flow.
855 * Action configuration.
858 flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow,
859 const struct rte_flow_action *action)
861 const struct rte_flow_action_queue *queue = action->conf;
862 struct rte_flow *flow = dev_flow->flow;
865 (*flow->queue)[0] = queue->index;
866 flow->rss.queue_num = 1;
870 * Convert the @p action into a Verbs specification. This function assumes that
871 * the input is valid and that there is space to insert the requested action
875 * Action configuration.
876 * @param[in, out] action_flags
877 * Pointer to the detected actions.
878 * @param[in] dev_flow
879 * Pointer to mlx5_flow.
882 flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow,
883 const struct rte_flow_action *action)
885 const struct rte_flow_action_rss *rss = action->conf;
886 const uint8_t *rss_key;
887 struct rte_flow *flow = dev_flow->flow;
890 memcpy((*flow->queue), rss->queue,
891 rss->queue_num * sizeof(uint16_t));
892 flow->rss.queue_num = rss->queue_num;
893 /* NULL RSS key indicates default RSS key. */
894 rss_key = !rss->key ? rss_hash_default_key : rss->key;
895 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
896 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
897 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
898 flow->rss.level = rss->level;
902 * Convert the @p action into a Verbs specification. This function assumes that
903 * the input is valid and that there is space to insert the requested action
906 * @param[in] dev_flow
907 * Pointer to mlx5_flow.
909 * Action configuration.
912 flow_verbs_translate_action_flag
913 (struct mlx5_flow *dev_flow,
914 const struct rte_flow_action *action __rte_unused)
916 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
917 struct ibv_flow_spec_action_tag tag = {
918 .type = IBV_FLOW_SPEC_ACTION_TAG,
920 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
923 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
927 * Convert the @p action into a Verbs specification. This function assumes that
928 * the input is valid and that there is space to insert the requested action
931 * @param[in] dev_flow
932 * Pointer to mlx5_flow.
934 * Action configuration.
937 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
938 const struct rte_flow_action *action)
940 const struct rte_flow_action_mark *mark = action->conf;
941 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
942 struct ibv_flow_spec_action_tag tag = {
943 .type = IBV_FLOW_SPEC_ACTION_TAG,
945 .tag_id = mlx5_flow_mark_set(mark->id),
948 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
952 * Convert the @p action into a Verbs specification. This function assumes that
953 * the input is valid and that there is space to insert the requested action
957 * Pointer to the Ethernet device structure.
959 * Action configuration.
960 * @param[in] dev_flow
961 * Pointer to mlx5_flow.
963 * Pointer to error structure.
966 * 0 On success else a negative errno value is returned and rte_errno is set.
969 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
970 const struct rte_flow_action *action,
971 struct rte_eth_dev *dev,
972 struct rte_flow_error *error)
974 const struct rte_flow_action_count *count = action->conf;
975 struct rte_flow *flow = dev_flow->flow;
976 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
977 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
978 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
979 struct ibv_flow_spec_counter_action counter = {
980 .type = IBV_FLOW_SPEC_ACTION_COUNT,
985 if (!flow->counter) {
986 flow->counter = flow_verbs_counter_new(dev, count->shared,
989 return rte_flow_error_set(error, rte_errno,
990 RTE_FLOW_ERROR_TYPE_ACTION,
995 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
996 counter.counter_set_handle = flow->counter->cs->handle;
997 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
998 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
999 counter.counters = flow->counter->cs;
1000 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1006 * Internal validation function. For validating both actions and items.
1009 * Pointer to the Ethernet device structure.
1011 * Pointer to the flow attributes.
1013 * Pointer to the list of items.
1014 * @param[in] actions
1015 * Pointer to the list of actions.
1017 * Pointer to the error structure.
1020 * 0 on success, a negative errno value otherwise and rte_errno is set.
1023 flow_verbs_validate(struct rte_eth_dev *dev,
1024 const struct rte_flow_attr *attr,
1025 const struct rte_flow_item items[],
1026 const struct rte_flow_action actions[],
1027 struct rte_flow_error *error)
1030 uint64_t action_flags = 0;
1031 uint64_t item_flags = 0;
1032 uint64_t last_item = 0;
1033 uint8_t next_protocol = 0xff;
1037 ret = mlx5_flow_validate_attributes(dev, attr, error);
1040 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1041 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1044 switch (items->type) {
1045 case RTE_FLOW_ITEM_TYPE_VOID:
1047 case RTE_FLOW_ITEM_TYPE_ETH:
1048 ret = mlx5_flow_validate_item_eth(items, item_flags,
1052 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1053 MLX5_FLOW_LAYER_OUTER_L2;
1055 case RTE_FLOW_ITEM_TYPE_VLAN:
1056 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1060 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1061 MLX5_FLOW_LAYER_INNER_VLAN) :
1062 (MLX5_FLOW_LAYER_OUTER_L2 |
1063 MLX5_FLOW_LAYER_OUTER_VLAN);
1065 case RTE_FLOW_ITEM_TYPE_IPV4:
1066 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1070 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1071 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1072 if (items->mask != NULL &&
1073 ((const struct rte_flow_item_ipv4 *)
1074 items->mask)->hdr.next_proto_id) {
1076 ((const struct rte_flow_item_ipv4 *)
1077 (items->spec))->hdr.next_proto_id;
1079 ((const struct rte_flow_item_ipv4 *)
1080 (items->mask))->hdr.next_proto_id;
1082 /* Reset for inner layer. */
1083 next_protocol = 0xff;
1086 case RTE_FLOW_ITEM_TYPE_IPV6:
1087 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1091 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1092 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1093 if (items->mask != NULL &&
1094 ((const struct rte_flow_item_ipv6 *)
1095 items->mask)->hdr.proto) {
1097 ((const struct rte_flow_item_ipv6 *)
1098 items->spec)->hdr.proto;
1100 ((const struct rte_flow_item_ipv6 *)
1101 items->mask)->hdr.proto;
1103 /* Reset for inner layer. */
1104 next_protocol = 0xff;
1107 case RTE_FLOW_ITEM_TYPE_UDP:
1108 ret = mlx5_flow_validate_item_udp(items, item_flags,
1113 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1114 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1116 case RTE_FLOW_ITEM_TYPE_TCP:
1117 ret = mlx5_flow_validate_item_tcp
1120 &rte_flow_item_tcp_mask,
1124 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1125 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1127 case RTE_FLOW_ITEM_TYPE_VXLAN:
1128 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1132 last_item = MLX5_FLOW_LAYER_VXLAN;
1134 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1135 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1140 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1142 case RTE_FLOW_ITEM_TYPE_GRE:
1143 ret = mlx5_flow_validate_item_gre(items, item_flags,
1144 next_protocol, error);
1147 last_item = MLX5_FLOW_LAYER_GRE;
1149 case RTE_FLOW_ITEM_TYPE_MPLS:
1150 ret = mlx5_flow_validate_item_mpls(dev, items,
1155 last_item = MLX5_FLOW_LAYER_MPLS;
1158 return rte_flow_error_set(error, ENOTSUP,
1159 RTE_FLOW_ERROR_TYPE_ITEM,
1160 NULL, "item not supported");
1162 item_flags |= last_item;
1164 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1165 switch (actions->type) {
1166 case RTE_FLOW_ACTION_TYPE_VOID:
1168 case RTE_FLOW_ACTION_TYPE_FLAG:
1169 ret = mlx5_flow_validate_action_flag(action_flags,
1174 action_flags |= MLX5_FLOW_ACTION_FLAG;
1176 case RTE_FLOW_ACTION_TYPE_MARK:
1177 ret = mlx5_flow_validate_action_mark(actions,
1183 action_flags |= MLX5_FLOW_ACTION_MARK;
1185 case RTE_FLOW_ACTION_TYPE_DROP:
1186 ret = mlx5_flow_validate_action_drop(action_flags,
1191 action_flags |= MLX5_FLOW_ACTION_DROP;
1193 case RTE_FLOW_ACTION_TYPE_QUEUE:
1194 ret = mlx5_flow_validate_action_queue(actions,
1200 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1202 case RTE_FLOW_ACTION_TYPE_RSS:
1203 ret = mlx5_flow_validate_action_rss(actions,
1209 action_flags |= MLX5_FLOW_ACTION_RSS;
1211 case RTE_FLOW_ACTION_TYPE_COUNT:
1212 ret = mlx5_flow_validate_action_count(dev, attr, error);
1215 action_flags |= MLX5_FLOW_ACTION_COUNT;
1218 return rte_flow_error_set(error, ENOTSUP,
1219 RTE_FLOW_ERROR_TYPE_ACTION,
1221 "action not supported");
1224 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1225 return rte_flow_error_set(error, EINVAL,
1226 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1227 "no fate action is found");
1232 * Calculate the required bytes that are needed for the action part of the verbs
1235 * @param[in] actions
1236 * Pointer to the list of actions.
1239 * The size of the memory needed for all actions.
1242 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1246 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1247 switch (actions->type) {
1248 case RTE_FLOW_ACTION_TYPE_VOID:
1250 case RTE_FLOW_ACTION_TYPE_FLAG:
1251 size += sizeof(struct ibv_flow_spec_action_tag);
1253 case RTE_FLOW_ACTION_TYPE_MARK:
1254 size += sizeof(struct ibv_flow_spec_action_tag);
1256 case RTE_FLOW_ACTION_TYPE_DROP:
1257 size += sizeof(struct ibv_flow_spec_action_drop);
1259 case RTE_FLOW_ACTION_TYPE_QUEUE:
1261 case RTE_FLOW_ACTION_TYPE_RSS:
1263 case RTE_FLOW_ACTION_TYPE_COUNT:
1264 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1265 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1266 size += sizeof(struct ibv_flow_spec_counter_action);
1277 * Calculate the required bytes that are needed for the item part of the verbs
1281 * Pointer to the list of items.
1284 * The size of the memory needed for all items.
1287 flow_verbs_get_items_size(const struct rte_flow_item items[])
1291 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1292 switch (items->type) {
1293 case RTE_FLOW_ITEM_TYPE_VOID:
1295 case RTE_FLOW_ITEM_TYPE_ETH:
1296 size += sizeof(struct ibv_flow_spec_eth);
1298 case RTE_FLOW_ITEM_TYPE_VLAN:
1299 size += sizeof(struct ibv_flow_spec_eth);
1301 case RTE_FLOW_ITEM_TYPE_IPV4:
1302 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1304 case RTE_FLOW_ITEM_TYPE_IPV6:
1305 size += sizeof(struct ibv_flow_spec_ipv6);
1307 case RTE_FLOW_ITEM_TYPE_UDP:
1308 size += sizeof(struct ibv_flow_spec_tcp_udp);
1310 case RTE_FLOW_ITEM_TYPE_TCP:
1311 size += sizeof(struct ibv_flow_spec_tcp_udp);
1313 case RTE_FLOW_ITEM_TYPE_VXLAN:
1314 size += sizeof(struct ibv_flow_spec_tunnel);
1316 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1317 size += sizeof(struct ibv_flow_spec_tunnel);
1319 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1320 case RTE_FLOW_ITEM_TYPE_GRE:
1321 size += sizeof(struct ibv_flow_spec_gre);
1323 case RTE_FLOW_ITEM_TYPE_MPLS:
1324 size += sizeof(struct ibv_flow_spec_mpls);
1327 case RTE_FLOW_ITEM_TYPE_GRE:
1328 size += sizeof(struct ibv_flow_spec_tunnel);
1339 * Internal preparation function. Allocate mlx5_flow with the required size.
1340 * The required size is calculate based on the actions and items. This function
1341 * also returns the detected actions and items for later use.
1344 * Pointer to the flow attributes.
1346 * Pointer to the list of items.
1347 * @param[in] actions
1348 * Pointer to the list of actions.
1350 * Pointer to the error structure.
1353 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1356 static struct mlx5_flow *
1357 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1358 const struct rte_flow_item items[],
1359 const struct rte_flow_action actions[],
1360 struct rte_flow_error *error)
1362 uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1363 struct mlx5_flow *flow;
1365 size += flow_verbs_get_actions_size(actions);
1366 size += flow_verbs_get_items_size(items);
1367 flow = rte_calloc(__func__, 1, size, 0);
1369 rte_flow_error_set(error, ENOMEM,
1370 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1371 "not enough memory to create flow");
1374 flow->verbs.attr = (void *)(flow + 1);
1376 (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
1381 * Fill the flow with verb spec.
1384 * Pointer to Ethernet device.
1385 * @param[in, out] dev_flow
1386 * Pointer to the mlx5 flow.
1388 * Pointer to the flow attributes.
1390 * Pointer to the list of items.
1391 * @param[in] actions
1392 * Pointer to the list of actions.
1394 * Pointer to the error structure.
1397 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1400 flow_verbs_translate(struct rte_eth_dev *dev,
1401 struct mlx5_flow *dev_flow,
1402 const struct rte_flow_attr *attr,
1403 const struct rte_flow_item items[],
1404 const struct rte_flow_action actions[],
1405 struct rte_flow_error *error)
1407 struct rte_flow *flow = dev_flow->flow;
1408 uint64_t item_flags = 0;
1409 uint64_t action_flags = 0;
1410 uint64_t priority = attr->priority;
1411 uint32_t subpriority = 0;
1412 struct mlx5_priv *priv = dev->data->dev_private;
1414 if (priority == MLX5_FLOW_PRIO_RSVD)
1415 priority = priv->config.flow_prio - 1;
1416 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1419 switch (actions->type) {
1420 case RTE_FLOW_ACTION_TYPE_VOID:
1422 case RTE_FLOW_ACTION_TYPE_FLAG:
1423 flow_verbs_translate_action_flag(dev_flow, actions);
1424 action_flags |= MLX5_FLOW_ACTION_FLAG;
1426 case RTE_FLOW_ACTION_TYPE_MARK:
1427 flow_verbs_translate_action_mark(dev_flow, actions);
1428 action_flags |= MLX5_FLOW_ACTION_MARK;
1430 case RTE_FLOW_ACTION_TYPE_DROP:
1431 flow_verbs_translate_action_drop(dev_flow, actions);
1432 action_flags |= MLX5_FLOW_ACTION_DROP;
1434 case RTE_FLOW_ACTION_TYPE_QUEUE:
1435 flow_verbs_translate_action_queue(dev_flow, actions);
1436 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1438 case RTE_FLOW_ACTION_TYPE_RSS:
1439 flow_verbs_translate_action_rss(dev_flow, actions);
1440 action_flags |= MLX5_FLOW_ACTION_RSS;
1442 case RTE_FLOW_ACTION_TYPE_COUNT:
1443 ret = flow_verbs_translate_action_count(dev_flow,
1448 action_flags |= MLX5_FLOW_ACTION_COUNT;
1451 return rte_flow_error_set(error, ENOTSUP,
1452 RTE_FLOW_ERROR_TYPE_ACTION,
1454 "action not supported");
1457 flow->actions = action_flags;
1458 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1459 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1461 switch (items->type) {
1462 case RTE_FLOW_ITEM_TYPE_VOID:
1464 case RTE_FLOW_ITEM_TYPE_ETH:
1465 flow_verbs_translate_item_eth(dev_flow, items,
1467 subpriority = MLX5_PRIORITY_MAP_L2;
1468 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1469 MLX5_FLOW_LAYER_OUTER_L2;
1471 case RTE_FLOW_ITEM_TYPE_VLAN:
1472 flow_verbs_translate_item_vlan(dev_flow, items,
1474 subpriority = MLX5_PRIORITY_MAP_L2;
1475 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1476 MLX5_FLOW_LAYER_INNER_VLAN) :
1477 (MLX5_FLOW_LAYER_OUTER_L2 |
1478 MLX5_FLOW_LAYER_OUTER_VLAN);
1480 case RTE_FLOW_ITEM_TYPE_IPV4:
1481 flow_verbs_translate_item_ipv4(dev_flow, items,
1483 subpriority = MLX5_PRIORITY_MAP_L3;
1484 dev_flow->verbs.hash_fields |=
1485 mlx5_flow_hashfields_adjust
1487 MLX5_IPV4_LAYER_TYPES,
1488 MLX5_IPV4_IBV_RX_HASH);
1489 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1490 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1492 case RTE_FLOW_ITEM_TYPE_IPV6:
1493 flow_verbs_translate_item_ipv6(dev_flow, items,
1495 subpriority = MLX5_PRIORITY_MAP_L3;
1496 dev_flow->verbs.hash_fields |=
1497 mlx5_flow_hashfields_adjust
1499 MLX5_IPV6_LAYER_TYPES,
1500 MLX5_IPV6_IBV_RX_HASH);
1501 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1502 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1504 case RTE_FLOW_ITEM_TYPE_TCP:
1505 flow_verbs_translate_item_tcp(dev_flow, items,
1507 subpriority = MLX5_PRIORITY_MAP_L4;
1508 dev_flow->verbs.hash_fields |=
1509 mlx5_flow_hashfields_adjust
1510 (dev_flow, tunnel, ETH_RSS_TCP,
1511 (IBV_RX_HASH_SRC_PORT_TCP |
1512 IBV_RX_HASH_DST_PORT_TCP));
1513 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1514 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1516 case RTE_FLOW_ITEM_TYPE_UDP:
1517 flow_verbs_translate_item_udp(dev_flow, items,
1519 subpriority = MLX5_PRIORITY_MAP_L4;
1520 dev_flow->verbs.hash_fields |=
1521 mlx5_flow_hashfields_adjust
1522 (dev_flow, tunnel, ETH_RSS_UDP,
1523 (IBV_RX_HASH_SRC_PORT_UDP |
1524 IBV_RX_HASH_DST_PORT_UDP));
1525 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1526 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1528 case RTE_FLOW_ITEM_TYPE_VXLAN:
1529 flow_verbs_translate_item_vxlan(dev_flow, items,
1531 subpriority = MLX5_PRIORITY_MAP_L2;
1532 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1534 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1535 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1537 subpriority = MLX5_PRIORITY_MAP_L2;
1538 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1540 case RTE_FLOW_ITEM_TYPE_GRE:
1541 flow_verbs_translate_item_gre(dev_flow, items,
1543 subpriority = MLX5_PRIORITY_MAP_L2;
1544 item_flags |= MLX5_FLOW_LAYER_GRE;
1546 case RTE_FLOW_ITEM_TYPE_MPLS:
1547 flow_verbs_translate_item_mpls(dev_flow, items,
1549 subpriority = MLX5_PRIORITY_MAP_L2;
1550 item_flags |= MLX5_FLOW_LAYER_MPLS;
1553 return rte_flow_error_set(error, ENOTSUP,
1554 RTE_FLOW_ERROR_TYPE_ITEM,
1556 "item not supported");
1559 dev_flow->layers = item_flags;
1560 dev_flow->verbs.attr->priority =
1561 mlx5_flow_adjust_priority(dev, priority, subpriority);
1562 dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
1567 * Remove the flow from the NIC but keeps it in memory.
1570 * Pointer to the Ethernet device structure.
1571 * @param[in, out] flow
1572 * Pointer to flow structure.
1575 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1577 struct mlx5_flow_verbs *verbs;
1578 struct mlx5_flow *dev_flow;
1582 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1583 verbs = &dev_flow->verbs;
1585 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1589 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1590 mlx5_hrxq_drop_release(dev);
1592 mlx5_hrxq_release(dev, verbs->hrxq);
1599 * Remove the flow from the NIC and the memory.
1602 * Pointer to the Ethernet device structure.
1603 * @param[in, out] flow
1604 * Pointer to flow structure.
1607 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1609 struct mlx5_flow *dev_flow;
1613 flow_verbs_remove(dev, flow);
1614 while (!LIST_EMPTY(&flow->dev_flows)) {
1615 dev_flow = LIST_FIRST(&flow->dev_flows);
1616 LIST_REMOVE(dev_flow, next);
1619 if (flow->counter) {
1620 flow_verbs_counter_release(dev, flow->counter);
1621 flow->counter = NULL;
1626 * Apply the flow to the NIC.
1629 * Pointer to the Ethernet device structure.
1630 * @param[in, out] flow
1631 * Pointer to flow structure.
1633 * Pointer to error structure.
1636 * 0 on success, a negative errno value otherwise and rte_errno is set.
1639 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1640 struct rte_flow_error *error)
1642 struct mlx5_flow_verbs *verbs;
1643 struct mlx5_flow *dev_flow;
1646 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1647 verbs = &dev_flow->verbs;
1648 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1649 verbs->hrxq = mlx5_hrxq_drop_new(dev);
1653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1654 "cannot get drop hash queue");
1658 struct mlx5_hrxq *hrxq;
1660 hrxq = mlx5_hrxq_get(dev, flow->key,
1661 MLX5_RSS_HASH_KEY_LEN,
1664 flow->rss.queue_num);
1666 hrxq = mlx5_hrxq_new(dev, flow->key,
1667 MLX5_RSS_HASH_KEY_LEN,
1670 flow->rss.queue_num,
1671 !!(dev_flow->layers &
1672 MLX5_FLOW_LAYER_TUNNEL));
1676 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1677 "cannot get hash queue");
1682 verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1685 rte_flow_error_set(error, errno,
1686 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1688 "hardware refuses to create flow");
1694 err = rte_errno; /* Save rte_errno before cleanup. */
1695 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1696 verbs = &dev_flow->verbs;
1698 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1699 mlx5_hrxq_drop_release(dev);
1701 mlx5_hrxq_release(dev, verbs->hrxq);
1705 rte_errno = err; /* Restore rte_errno. */
1712 * @see rte_flow_query()
1716 flow_verbs_query(struct rte_eth_dev *dev,
1717 struct rte_flow *flow,
1718 const struct rte_flow_action *actions,
1720 struct rte_flow_error *error)
1724 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1725 switch (actions->type) {
1726 case RTE_FLOW_ACTION_TYPE_VOID:
1728 case RTE_FLOW_ACTION_TYPE_COUNT:
1729 ret = flow_verbs_counter_query(dev, flow, data, error);
1732 return rte_flow_error_set(error, ENOTSUP,
1733 RTE_FLOW_ERROR_TYPE_ACTION,
1735 "action not supported");
1741 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1742 .validate = flow_verbs_validate,
1743 .prepare = flow_verbs_prepare,
1744 .translate = flow_verbs_translate,
1745 .apply = flow_verbs_apply,
1746 .remove = flow_verbs_remove,
1747 .destroy = flow_verbs_destroy,
1748 .query = flow_verbs_query,