1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
40 * Pointer to the Ethernet device structure.
42 * Indicate if this counter is shared with other flows.
47 * A pointer to the counter, NULL otherwise and rte_errno is set.
49 static struct mlx5_flow_counter *
50 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
52 struct priv *priv = dev->data->dev_private;
53 struct mlx5_flow_counter *cnt;
55 LIST_FOREACH(cnt, &priv->flow_counters, next) {
56 if (!cnt->shared || cnt->shared != shared)
63 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
65 struct mlx5_flow_counter tmpl = {
68 .cs = mlx5_glue->create_counter_set
70 &(struct ibv_counter_set_init_attr){
81 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
87 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
95 * Release a flow counter.
98 * Pointer to the counter handler.
101 flow_verbs_counter_release(struct mlx5_flow_counter *counter)
103 if (--counter->ref_cnt == 0) {
104 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
105 LIST_REMOVE(counter, next);
111 * Add a verbs item specification into @p flow.
113 * @param[in, out] flow
114 * Pointer to flow structure.
116 * Create specification.
118 * Size in bytes of the specification to copy.
121 flow_verbs_spec_add(struct mlx5_flow *flow, void *src, unsigned int size)
123 struct mlx5_flow_verbs *verbs = &flow->verbs;
128 dst = (void *)(verbs->specs + verbs->size);
129 memcpy(dst, src, size);
130 ++verbs->attr->num_of_specs;
136 * Convert the @p item into a Verbs specification. This function assumes that
137 * the input is valid and that there is space to insert the requested item
141 * Item specification.
142 * @param[in] item_flags
143 * Bit field with all detected items.
144 * @param[in, out] dev_flow
145 * Pointer to dev_flow structure.
148 flow_verbs_translate_item_eth(const struct rte_flow_item *item,
149 uint64_t *item_flags,
150 struct mlx5_flow *dev_flow)
152 const struct rte_flow_item_eth *spec = item->spec;
153 const struct rte_flow_item_eth *mask = item->mask;
154 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
155 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
156 struct ibv_flow_spec_eth eth = {
157 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
162 mask = &rte_flow_item_eth_mask;
166 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
167 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
168 eth.val.ether_type = spec->type;
169 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
170 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
171 eth.mask.ether_type = mask->type;
172 /* Remove unwanted bits from values. */
173 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
174 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
175 eth.val.src_mac[i] &= eth.mask.src_mac[i];
177 eth.val.ether_type &= eth.mask.ether_type;
178 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
180 flow_verbs_spec_add(dev_flow, ð, size);
181 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
182 MLX5_FLOW_LAYER_OUTER_L2;
186 * Update the VLAN tag in the Verbs Ethernet specification.
187 * This function assumes that the input is valid and there is space to add
188 * the requested item.
190 * @param[in, out] attr
191 * Pointer to Verbs attributes structure.
193 * Verbs structure containing the VLAN information to copy.
196 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
197 struct ibv_flow_spec_eth *eth)
200 const enum ibv_flow_spec_type search = eth->type;
201 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
202 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
204 for (i = 0; i != attr->num_of_specs; ++i) {
205 if (hdr->type == search) {
206 struct ibv_flow_spec_eth *e =
207 (struct ibv_flow_spec_eth *)hdr;
209 e->val.vlan_tag = eth->val.vlan_tag;
210 e->mask.vlan_tag = eth->mask.vlan_tag;
211 e->val.ether_type = eth->val.ether_type;
212 e->mask.ether_type = eth->mask.ether_type;
215 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
220 * Convert the @p item into a Verbs specification. This function assumes that
221 * the input is valid and that there is space to insert the requested item
225 * Item specification.
226 * @param[in, out] item_flags
227 * Bit mask that holds all detected items.
228 * @param[in, out] dev_flow
229 * Pointer to dev_flow structure.
232 flow_verbs_translate_item_vlan(const struct rte_flow_item *item,
233 uint64_t *item_flags,
234 struct mlx5_flow *dev_flow)
236 const struct rte_flow_item_vlan *spec = item->spec;
237 const struct rte_flow_item_vlan *mask = item->mask;
238 unsigned int size = sizeof(struct ibv_flow_spec_eth);
239 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
240 struct ibv_flow_spec_eth eth = {
241 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
244 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
245 MLX5_FLOW_LAYER_OUTER_L2;
248 mask = &rte_flow_item_vlan_mask;
250 eth.val.vlan_tag = spec->tci;
251 eth.mask.vlan_tag = mask->tci;
252 eth.val.vlan_tag &= eth.mask.vlan_tag;
253 eth.val.ether_type = spec->inner_type;
254 eth.mask.ether_type = mask->inner_type;
255 eth.val.ether_type &= eth.mask.ether_type;
257 if (!(*item_flags & l2m)) {
258 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
259 flow_verbs_spec_add(dev_flow, ð, size);
261 flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
262 size = 0; /* Only an update is done in eth specification. */
264 *item_flags |= tunnel ?
265 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
266 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
270 * Convert the @p item into a Verbs specification. This function assumes that
271 * the input is valid and that there is space to insert the requested item
275 * Item specification.
276 * @param[in, out] item_flags
277 * Bit mask that marks all detected items.
278 * @param[in, out] dev_flow
279 * Pointer to sepacific flow structure.
282 flow_verbs_translate_item_ipv4(const struct rte_flow_item *item,
283 uint64_t *item_flags,
284 struct mlx5_flow *dev_flow)
286 const struct rte_flow_item_ipv4 *spec = item->spec;
287 const struct rte_flow_item_ipv4 *mask = item->mask;
288 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
289 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
290 struct ibv_flow_spec_ipv4_ext ipv4 = {
291 .type = IBV_FLOW_SPEC_IPV4_EXT |
292 (tunnel ? IBV_FLOW_SPEC_INNER : 0),
297 mask = &rte_flow_item_ipv4_mask;
298 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
299 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
301 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
302 .src_ip = spec->hdr.src_addr,
303 .dst_ip = spec->hdr.dst_addr,
304 .proto = spec->hdr.next_proto_id,
305 .tos = spec->hdr.type_of_service,
307 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
308 .src_ip = mask->hdr.src_addr,
309 .dst_ip = mask->hdr.dst_addr,
310 .proto = mask->hdr.next_proto_id,
311 .tos = mask->hdr.type_of_service,
313 /* Remove unwanted bits from values. */
314 ipv4.val.src_ip &= ipv4.mask.src_ip;
315 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
316 ipv4.val.proto &= ipv4.mask.proto;
317 ipv4.val.tos &= ipv4.mask.tos;
319 dev_flow->verbs.hash_fields |=
320 mlx5_flow_hashfields_adjust(dev_flow, tunnel,
321 MLX5_IPV4_LAYER_TYPES,
322 MLX5_IPV4_IBV_RX_HASH);
323 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
324 flow_verbs_spec_add(dev_flow, &ipv4, size);
328 * Convert the @p item into a Verbs specification. This function assumes that
329 * the input is valid and that there is space to insert the requested item
333 * Item specification.
334 * @param[in, out] item_flags
335 * Bit mask that marks all detected items.
336 * @param[in, out] dev_flow
337 * Pointer to sepacific flow structure.
340 flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
341 uint64_t *item_flags,
342 struct mlx5_flow *dev_flow)
344 const struct rte_flow_item_ipv6 *spec = item->spec;
345 const struct rte_flow_item_ipv6 *mask = item->mask;
346 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
347 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
348 struct ibv_flow_spec_ipv6 ipv6 = {
349 .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
354 mask = &rte_flow_item_ipv6_mask;
355 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
356 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
359 uint32_t vtc_flow_val;
360 uint32_t vtc_flow_mask;
362 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
363 RTE_DIM(ipv6.val.src_ip));
364 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
365 RTE_DIM(ipv6.val.dst_ip));
366 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
367 RTE_DIM(ipv6.mask.src_ip));
368 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
369 RTE_DIM(ipv6.mask.dst_ip));
370 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
371 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
372 ipv6.val.flow_label =
373 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
375 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
377 ipv6.val.next_hdr = spec->hdr.proto;
378 ipv6.val.hop_limit = spec->hdr.hop_limits;
379 ipv6.mask.flow_label =
380 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
382 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
384 ipv6.mask.next_hdr = mask->hdr.proto;
385 ipv6.mask.hop_limit = mask->hdr.hop_limits;
386 /* Remove unwanted bits from values. */
387 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
388 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
389 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
391 ipv6.val.flow_label &= ipv6.mask.flow_label;
392 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
393 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
394 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
396 dev_flow->verbs.hash_fields |=
397 mlx5_flow_hashfields_adjust(dev_flow, tunnel,
398 MLX5_IPV6_LAYER_TYPES,
399 MLX5_IPV6_IBV_RX_HASH);
400 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
401 flow_verbs_spec_add(dev_flow, &ipv6, size);
405 * Convert the @p item into a Verbs specification. This function assumes that
406 * the input is valid and that there is space to insert the requested item
410 * Item specification.
411 * @param[in, out] item_flags
412 * Bit mask that marks all detected items.
413 * @param[in, out] dev_flow
414 * Pointer to sepacific flow structure.
417 flow_verbs_translate_item_udp(const struct rte_flow_item *item,
418 uint64_t *item_flags,
419 struct mlx5_flow *dev_flow)
421 const struct rte_flow_item_udp *spec = item->spec;
422 const struct rte_flow_item_udp *mask = item->mask;
423 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
424 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
425 struct ibv_flow_spec_tcp_udp udp = {
426 .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
431 mask = &rte_flow_item_udp_mask;
432 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
433 MLX5_FLOW_LAYER_OUTER_L4_UDP;
435 udp.val.dst_port = spec->hdr.dst_port;
436 udp.val.src_port = spec->hdr.src_port;
437 udp.mask.dst_port = mask->hdr.dst_port;
438 udp.mask.src_port = mask->hdr.src_port;
439 /* Remove unwanted bits from values. */
440 udp.val.src_port &= udp.mask.src_port;
441 udp.val.dst_port &= udp.mask.dst_port;
443 dev_flow->verbs.hash_fields |=
444 mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_UDP,
445 (IBV_RX_HASH_SRC_PORT_UDP |
446 IBV_RX_HASH_DST_PORT_UDP));
447 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
448 flow_verbs_spec_add(dev_flow, &udp, size);
452 * Convert the @p item into a Verbs specification. This function assumes that
453 * the input is valid and that there is space to insert the requested item
457 * Item specification.
458 * @param[in, out] item_flags
459 * Bit mask that marks all detected items.
460 * @param[in, out] dev_flow
461 * Pointer to sepacific flow structure.
464 flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
465 uint64_t *item_flags,
466 struct mlx5_flow *dev_flow)
468 const struct rte_flow_item_tcp *spec = item->spec;
469 const struct rte_flow_item_tcp *mask = item->mask;
470 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
471 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
472 struct ibv_flow_spec_tcp_udp tcp = {
473 .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
478 mask = &rte_flow_item_tcp_mask;
479 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
480 MLX5_FLOW_LAYER_OUTER_L4_TCP;
482 tcp.val.dst_port = spec->hdr.dst_port;
483 tcp.val.src_port = spec->hdr.src_port;
484 tcp.mask.dst_port = mask->hdr.dst_port;
485 tcp.mask.src_port = mask->hdr.src_port;
486 /* Remove unwanted bits from values. */
487 tcp.val.src_port &= tcp.mask.src_port;
488 tcp.val.dst_port &= tcp.mask.dst_port;
490 dev_flow->verbs.hash_fields |=
491 mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_TCP,
492 (IBV_RX_HASH_SRC_PORT_TCP |
493 IBV_RX_HASH_DST_PORT_TCP));
494 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
495 flow_verbs_spec_add(dev_flow, &tcp, size);
499 * Convert the @p item into a Verbs specification. This function assumes that
500 * the input is valid and that there is space to insert the requested item
504 * Item specification.
505 * @param[in, out] item_flags
506 * Bit mask that marks all detected items.
507 * @param[in, out] dev_flow
508 * Pointer to sepacific flow structure.
511 flow_verbs_translate_item_vxlan(const struct rte_flow_item *item,
512 uint64_t *item_flags,
513 struct mlx5_flow *dev_flow)
515 const struct rte_flow_item_vxlan *spec = item->spec;
516 const struct rte_flow_item_vxlan *mask = item->mask;
517 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
518 struct ibv_flow_spec_tunnel vxlan = {
519 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
525 } id = { .vlan_id = 0, };
528 mask = &rte_flow_item_vxlan_mask;
530 memcpy(&id.vni[1], spec->vni, 3);
531 vxlan.val.tunnel_id = id.vlan_id;
532 memcpy(&id.vni[1], mask->vni, 3);
533 vxlan.mask.tunnel_id = id.vlan_id;
534 /* Remove unwanted bits from values. */
535 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
537 flow_verbs_spec_add(dev_flow, &vxlan, size);
538 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
539 *item_flags |= MLX5_FLOW_LAYER_VXLAN;
543 * Convert the @p item into a Verbs specification. This function assumes that
544 * the input is valid and that there is space to insert the requested item
548 * Item specification.
549 * @param[in, out] item_flags
550 * Bit mask that marks all detected items.
551 * @param[in, out] dev_flow
552 * Pointer to sepacific flow structure.
555 flow_verbs_translate_item_vxlan_gpe(const struct rte_flow_item *item,
556 uint64_t *item_flags,
557 struct mlx5_flow *dev_flow)
559 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
560 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
561 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
562 struct ibv_flow_spec_tunnel vxlan_gpe = {
563 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
569 } id = { .vlan_id = 0, };
572 mask = &rte_flow_item_vxlan_gpe_mask;
574 memcpy(&id.vni[1], spec->vni, 3);
575 vxlan_gpe.val.tunnel_id = id.vlan_id;
576 memcpy(&id.vni[1], mask->vni, 3);
577 vxlan_gpe.mask.tunnel_id = id.vlan_id;
578 /* Remove unwanted bits from values. */
579 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
581 flow_verbs_spec_add(dev_flow, &vxlan_gpe, size);
582 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
583 *item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
587 * Update the protocol in Verbs IPv4/IPv6 spec.
589 * @param[in, out] attr
590 * Pointer to Verbs attributes structure.
592 * Specification type to search in order to update the IP protocol.
593 * @param[in] protocol
594 * Protocol value to set if none is present in the specification.
597 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
598 enum ibv_flow_spec_type search,
602 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
603 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
607 for (i = 0; i != attr->num_of_specs; ++i) {
608 if (hdr->type == search) {
610 struct ibv_flow_spec_ipv4_ext *ipv4;
611 struct ibv_flow_spec_ipv6 *ipv6;
615 case IBV_FLOW_SPEC_IPV4_EXT:
616 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
617 if (!ip.ipv4->val.proto) {
618 ip.ipv4->val.proto = protocol;
619 ip.ipv4->mask.proto = 0xff;
622 case IBV_FLOW_SPEC_IPV6:
623 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
624 if (!ip.ipv6->val.next_hdr) {
625 ip.ipv6->val.next_hdr = protocol;
626 ip.ipv6->mask.next_hdr = 0xff;
634 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
639 * Convert the @p item into a Verbs specification. This function assumes that
640 * the input is valid and that there is space to insert the requested item
644 * Item specification.
645 * @param[in, out] item_flags
646 * Bit mask that marks all detected items.
647 * @param[in, out] dev_flow
648 * Pointer to sepacific flow structure.
651 flow_verbs_translate_item_gre(const struct rte_flow_item *item __rte_unused,
652 uint64_t *item_flags,
653 struct mlx5_flow *dev_flow)
655 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
656 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
657 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
658 struct ibv_flow_spec_tunnel tunnel = {
659 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
663 const struct rte_flow_item_gre *spec = item->spec;
664 const struct rte_flow_item_gre *mask = item->mask;
665 unsigned int size = sizeof(struct ibv_flow_spec_gre);
666 struct ibv_flow_spec_gre tunnel = {
667 .type = IBV_FLOW_SPEC_GRE,
672 mask = &rte_flow_item_gre_mask;
674 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
675 tunnel.val.protocol = spec->protocol;
676 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
677 tunnel.mask.protocol = mask->protocol;
678 /* Remove unwanted bits from values. */
679 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
680 tunnel.val.protocol &= tunnel.mask.protocol;
681 tunnel.val.key &= tunnel.mask.key;
684 if (*item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
685 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
686 IBV_FLOW_SPEC_IPV4_EXT,
689 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
692 flow_verbs_spec_add(dev_flow, &tunnel, size);
693 verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
694 *item_flags |= MLX5_FLOW_LAYER_GRE;
698 * Convert the @p action into a Verbs specification. This function assumes that
699 * the input is valid and that there is space to insert the requested action
700 * into the flow. This function also return the action that was added.
703 * Item specification.
704 * @param[in, out] item_flags
705 * Bit mask that marks all detected items.
706 * @param[in, out] dev_flow
707 * Pointer to sepacific flow structure.
710 flow_verbs_translate_item_mpls(const struct rte_flow_item *item __rte_unused,
711 uint64_t *action_flags __rte_unused,
712 struct mlx5_flow *dev_flow __rte_unused)
714 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
715 const struct rte_flow_item_mpls *spec = item->spec;
716 const struct rte_flow_item_mpls *mask = item->mask;
717 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
718 struct ibv_flow_spec_mpls mpls = {
719 .type = IBV_FLOW_SPEC_MPLS,
724 mask = &rte_flow_item_mpls_mask;
726 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
727 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
728 /* Remove unwanted bits from values. */
729 mpls.val.label &= mpls.mask.label;
731 flow_verbs_spec_add(dev_flow, &mpls, size);
732 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
733 *action_flags |= MLX5_FLOW_LAYER_MPLS;
738 * Convert the @p action into a Verbs specification. This function assumes that
739 * the input is valid and that there is space to insert the requested action
740 * into the flow. This function also return the action that was added.
742 * @param[in, out] action_flags
743 * Pointer to the detected actions.
744 * @param[in] dev_flow
745 * Pointer to mlx5_flow.
748 flow_verbs_translate_action_drop(uint64_t *action_flags,
749 struct mlx5_flow *dev_flow)
751 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
752 struct ibv_flow_spec_action_drop drop = {
753 .type = IBV_FLOW_SPEC_ACTION_DROP,
757 flow_verbs_spec_add(dev_flow, &drop, size);
758 *action_flags |= MLX5_FLOW_ACTION_DROP;
762 * Convert the @p action into a Verbs specification. This function assumes that
763 * the input is valid and that there is space to insert the requested action
764 * into the flow. This function also return the action that was added.
767 * Action configuration.
768 * @param[in, out] action_flags
769 * Pointer to the detected actions.
770 * @param[in] dev_flow
771 * Pointer to mlx5_flow.
774 flow_verbs_translate_action_queue(const struct rte_flow_action *action,
775 uint64_t *action_flags,
776 struct mlx5_flow *dev_flow)
778 const struct rte_flow_action_queue *queue = action->conf;
779 struct rte_flow *flow = dev_flow->flow;
782 (*flow->queue)[0] = queue->index;
783 flow->rss.queue_num = 1;
784 *action_flags |= MLX5_FLOW_ACTION_QUEUE;
788 * Convert the @p action into a Verbs specification. This function assumes that
789 * the input is valid and that there is space to insert the requested action
790 * into the flow. This function also return the action that was added.
793 * Action configuration.
794 * @param[in, out] action_flags
795 * Pointer to the detected actions.
796 * @param[in] dev_flow
797 * Pointer to mlx5_flow.
800 flow_verbs_translate_action_rss(const struct rte_flow_action *action,
801 uint64_t *action_flags,
802 struct mlx5_flow *dev_flow)
804 const struct rte_flow_action_rss *rss = action->conf;
805 struct rte_flow *flow = dev_flow->flow;
808 memcpy((*flow->queue), rss->queue,
809 rss->queue_num * sizeof(uint16_t));
810 flow->rss.queue_num = rss->queue_num;
811 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
812 flow->rss.types = rss->types;
813 flow->rss.level = rss->level;
814 *action_flags |= MLX5_FLOW_ACTION_RSS;
818 * Convert the @p action into a Verbs specification. This function assumes that
819 * the input is valid and that there is space to insert the requested action
820 * into the flow. This function also return the action that was added.
823 * Action configuration.
824 * @param[in, out] action_flags
825 * Pointer to the detected actions.
826 * @param[in] dev_flow
827 * Pointer to mlx5_flow.
830 flow_verbs_translate_action_flag
831 (const struct rte_flow_action *action __rte_unused,
832 uint64_t *action_flags,
833 struct mlx5_flow *dev_flow)
835 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
836 struct ibv_flow_spec_action_tag tag = {
837 .type = IBV_FLOW_SPEC_ACTION_TAG,
839 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
841 *action_flags |= MLX5_FLOW_ACTION_MARK;
842 flow_verbs_spec_add(dev_flow, &tag, size);
846 * Update verbs specification to modify the flag to mark.
848 * @param[in, out] verbs
849 * Pointer to the mlx5_flow_verbs structure.
851 * Mark identifier to replace the flag.
854 flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
856 struct ibv_spec_header *hdr;
861 /* Update Verbs specification. */
862 hdr = (struct ibv_spec_header *)verbs->specs;
865 for (i = 0; i != verbs->attr->num_of_specs; ++i) {
866 if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
867 struct ibv_flow_spec_action_tag *t =
868 (struct ibv_flow_spec_action_tag *)hdr;
870 t->tag_id = mlx5_flow_mark_set(mark_id);
872 hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
877 * Convert the @p action into a Verbs specification. This function assumes that
878 * the input is valid and that there is space to insert the requested action
879 * into the flow. This function also return the action that was added.
882 * Action configuration.
883 * @param[in, out] action_flags
884 * Pointer to the detected actions.
885 * @param[in] dev_flow
886 * Pointer to mlx5_flow.
889 flow_verbs_translate_action_mark(const struct rte_flow_action *action,
890 uint64_t *action_flags,
891 struct mlx5_flow *dev_flow)
893 const struct rte_flow_action_mark *mark = action->conf;
894 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
895 struct ibv_flow_spec_action_tag tag = {
896 .type = IBV_FLOW_SPEC_ACTION_TAG,
899 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
901 if (*action_flags & MLX5_FLOW_ACTION_FLAG) {
902 flow_verbs_mark_update(verbs, mark->id);
905 tag.tag_id = mlx5_flow_mark_set(mark->id);
906 flow_verbs_spec_add(dev_flow, &tag, size);
908 *action_flags |= MLX5_FLOW_ACTION_MARK;
912 * Convert the @p action into a Verbs specification. This function assumes that
913 * the input is valid and that there is space to insert the requested action
914 * into the flow. This function also return the action that was added.
917 * Pointer to the Ethernet device structure.
919 * Action configuration.
920 * @param[in, out] action_flags
921 * Pointer to the detected actions.
922 * @param[in] dev_flow
923 * Pointer to mlx5_flow.
925 * Pointer to error structure.
928 * 0 On success else a negative errno value is returned and rte_errno is set.
931 flow_verbs_translate_action_count(struct rte_eth_dev *dev,
932 const struct rte_flow_action *action,
933 uint64_t *action_flags,
934 struct mlx5_flow *dev_flow,
935 struct rte_flow_error *error)
937 const struct rte_flow_action_count *count = action->conf;
938 struct rte_flow *flow = dev_flow->flow;
939 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
940 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
941 struct ibv_flow_spec_counter_action counter = {
942 .type = IBV_FLOW_SPEC_ACTION_COUNT,
947 if (!flow->counter) {
948 flow->counter = flow_verbs_counter_new(dev, count->shared,
951 return rte_flow_error_set(error, rte_errno,
952 RTE_FLOW_ERROR_TYPE_ACTION,
957 *action_flags |= MLX5_FLOW_ACTION_COUNT;
958 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
959 counter.counter_set_handle = flow->counter->cs->handle;
960 flow_verbs_spec_add(dev_flow, &counter, size);
966 * Internal validation function. For validating both actions and items.
969 * Pointer to the Ethernet device structure.
971 * Pointer to the flow attributes.
973 * Pointer to the list of items.
975 * Pointer to the list of actions.
977 * Pointer to the error structure.
980 * 0 on success, a negative errno value otherwise and rte_errno is set.
983 flow_verbs_validate(struct rte_eth_dev *dev,
984 const struct rte_flow_attr *attr,
985 const struct rte_flow_item items[],
986 const struct rte_flow_action actions[],
987 struct rte_flow_error *error)
990 uint32_t action_flags = 0;
991 uint32_t item_flags = 0;
993 uint8_t next_protocol = 0xff;
997 ret = mlx5_flow_validate_attributes(dev, attr, error);
1000 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1002 switch (items->type) {
1003 case RTE_FLOW_ITEM_TYPE_VOID:
1005 case RTE_FLOW_ITEM_TYPE_ETH:
1006 ret = mlx5_flow_validate_item_eth(items, item_flags,
1010 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1011 MLX5_FLOW_LAYER_OUTER_L2;
1013 case RTE_FLOW_ITEM_TYPE_VLAN:
1014 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1018 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1019 MLX5_FLOW_LAYER_OUTER_VLAN;
1021 case RTE_FLOW_ITEM_TYPE_IPV4:
1022 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1026 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1027 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1028 if (items->mask != NULL &&
1029 ((const struct rte_flow_item_ipv4 *)
1030 items->mask)->hdr.next_proto_id)
1032 ((const struct rte_flow_item_ipv4 *)
1033 (items->spec))->hdr.next_proto_id;
1035 case RTE_FLOW_ITEM_TYPE_IPV6:
1036 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1040 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1041 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1042 if (items->mask != NULL &&
1043 ((const struct rte_flow_item_ipv6 *)
1044 items->mask)->hdr.proto)
1046 ((const struct rte_flow_item_ipv6 *)
1047 items->spec)->hdr.proto;
1049 case RTE_FLOW_ITEM_TYPE_UDP:
1050 ret = mlx5_flow_validate_item_udp(items, item_flags,
1055 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1056 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1058 case RTE_FLOW_ITEM_TYPE_TCP:
1059 ret = mlx5_flow_validate_item_tcp
1062 &rte_flow_item_tcp_mask,
1066 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1067 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1069 case RTE_FLOW_ITEM_TYPE_VXLAN:
1070 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1074 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1076 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1077 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1082 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1084 case RTE_FLOW_ITEM_TYPE_GRE:
1085 ret = mlx5_flow_validate_item_gre(items, item_flags,
1086 next_protocol, error);
1089 item_flags |= MLX5_FLOW_LAYER_GRE;
1091 case RTE_FLOW_ITEM_TYPE_MPLS:
1092 ret = mlx5_flow_validate_item_mpls(items, item_flags,
1097 if (next_protocol != 0xff &&
1098 next_protocol != IPPROTO_MPLS)
1099 return rte_flow_error_set
1101 RTE_FLOW_ERROR_TYPE_ITEM, items,
1102 "protocol filtering not compatible"
1103 " with MPLS layer");
1104 item_flags |= MLX5_FLOW_LAYER_MPLS;
1107 return rte_flow_error_set(error, ENOTSUP,
1108 RTE_FLOW_ERROR_TYPE_ITEM,
1109 NULL, "item not supported");
1112 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1113 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1114 switch (actions->type) {
1115 case RTE_FLOW_ACTION_TYPE_VOID:
1117 case RTE_FLOW_ACTION_TYPE_FLAG:
1118 ret = mlx5_flow_validate_action_flag(action_flags,
1123 action_flags |= MLX5_FLOW_ACTION_FLAG;
1125 case RTE_FLOW_ACTION_TYPE_MARK:
1126 ret = mlx5_flow_validate_action_mark(actions,
1132 action_flags |= MLX5_FLOW_ACTION_MARK;
1134 case RTE_FLOW_ACTION_TYPE_DROP:
1135 ret = mlx5_flow_validate_action_drop(action_flags,
1140 action_flags |= MLX5_FLOW_ACTION_DROP;
1142 case RTE_FLOW_ACTION_TYPE_QUEUE:
1143 ret = mlx5_flow_validate_action_queue(actions,
1149 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1151 case RTE_FLOW_ACTION_TYPE_RSS:
1152 ret = mlx5_flow_validate_action_rss(actions,
1158 action_flags |= MLX5_FLOW_ACTION_RSS;
1160 case RTE_FLOW_ACTION_TYPE_COUNT:
1161 ret = mlx5_flow_validate_action_count(dev, attr, error);
1164 action_flags |= MLX5_FLOW_ACTION_COUNT;
1167 return rte_flow_error_set(error, ENOTSUP,
1168 RTE_FLOW_ERROR_TYPE_ACTION,
1170 "action not supported");
1173 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1174 return rte_flow_error_set(error, EINVAL,
1175 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1176 "no fate action is found");
1181 * Calculate the required bytes that are needed for the action part of the verbs
1182 * flow, in addtion returns bit-fields with all the detected action, in order to
1183 * avoid another interation over the actions.
1185 * @param[in] actions
1186 * Pointer to the list of actions.
1187 * @param[out] action_flags
1188 * Pointer to the detected actions.
1191 * The size of the memory needed for all actions.
1194 flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
1195 uint64_t *action_flags)
1198 uint64_t detected_actions = 0;
1200 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1201 switch (actions->type) {
1202 case RTE_FLOW_ACTION_TYPE_VOID:
1204 case RTE_FLOW_ACTION_TYPE_FLAG:
1205 size += sizeof(struct ibv_flow_spec_action_tag);
1206 detected_actions |= MLX5_FLOW_ACTION_FLAG;
1208 case RTE_FLOW_ACTION_TYPE_MARK:
1209 size += sizeof(struct ibv_flow_spec_action_tag);
1210 detected_actions |= MLX5_FLOW_ACTION_MARK;
1212 case RTE_FLOW_ACTION_TYPE_DROP:
1213 size += sizeof(struct ibv_flow_spec_action_drop);
1214 detected_actions |= MLX5_FLOW_ACTION_DROP;
1216 case RTE_FLOW_ACTION_TYPE_QUEUE:
1217 detected_actions |= MLX5_FLOW_ACTION_QUEUE;
1219 case RTE_FLOW_ACTION_TYPE_RSS:
1220 detected_actions |= MLX5_FLOW_ACTION_RSS;
1222 case RTE_FLOW_ACTION_TYPE_COUNT:
1223 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
1224 size += sizeof(struct ibv_flow_spec_counter_action);
1226 detected_actions |= MLX5_FLOW_ACTION_COUNT;
1232 *action_flags = detected_actions;
1237 * Calculate the required bytes that are needed for the item part of the verbs
1238 * flow, in addtion returns bit-fields with all the detected action, in order to
1239 * avoid another interation over the actions.
1241 * @param[in] actions
1242 * Pointer to the list of items.
1243 * @param[in, out] item_flags
1244 * Pointer to the detected items.
1247 * The size of the memory needed for all items.
1250 flow_verbs_get_items_and_size(const struct rte_flow_item items[],
1251 uint64_t *item_flags)
1254 uint64_t detected_items = 0;
1255 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
1257 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1258 switch (items->type) {
1259 case RTE_FLOW_ITEM_TYPE_VOID:
1261 case RTE_FLOW_ITEM_TYPE_ETH:
1262 size += sizeof(struct ibv_flow_spec_eth);
1263 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1264 MLX5_FLOW_LAYER_OUTER_L2;
1266 case RTE_FLOW_ITEM_TYPE_VLAN:
1267 size += sizeof(struct ibv_flow_spec_eth);
1268 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1269 MLX5_FLOW_LAYER_OUTER_VLAN;
1271 case RTE_FLOW_ITEM_TYPE_IPV4:
1272 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1273 detected_items |= tunnel ?
1274 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1275 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1277 case RTE_FLOW_ITEM_TYPE_IPV6:
1278 size += sizeof(struct ibv_flow_spec_ipv6);
1279 detected_items |= tunnel ?
1280 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1281 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1283 case RTE_FLOW_ITEM_TYPE_UDP:
1284 size += sizeof(struct ibv_flow_spec_tcp_udp);
1285 detected_items |= tunnel ?
1286 MLX5_FLOW_LAYER_INNER_L4_UDP :
1287 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1289 case RTE_FLOW_ITEM_TYPE_TCP:
1290 size += sizeof(struct ibv_flow_spec_tcp_udp);
1291 detected_items |= tunnel ?
1292 MLX5_FLOW_LAYER_INNER_L4_TCP :
1293 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1295 case RTE_FLOW_ITEM_TYPE_VXLAN:
1296 size += sizeof(struct ibv_flow_spec_tunnel);
1297 detected_items |= MLX5_FLOW_LAYER_VXLAN;
1299 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1300 size += sizeof(struct ibv_flow_spec_tunnel);
1301 detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
1303 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1304 case RTE_FLOW_ITEM_TYPE_GRE:
1305 size += sizeof(struct ibv_flow_spec_gre);
1306 detected_items |= MLX5_FLOW_LAYER_GRE;
1308 case RTE_FLOW_ITEM_TYPE_MPLS:
1309 size += sizeof(struct ibv_flow_spec_mpls);
1310 detected_items |= MLX5_FLOW_LAYER_MPLS;
1313 case RTE_FLOW_ITEM_TYPE_GRE:
1314 size += sizeof(struct ibv_flow_spec_tunnel);
1315 detected_items |= MLX5_FLOW_LAYER_TUNNEL;
1322 *item_flags = detected_items;
1327 * Internal preparation function. Allocate mlx5_flow with the required size.
1328 * The required size is calculate based on the actions and items. This function
1329 * also returns the detected actions and items for later use.
1332 * Pointer to the flow attributes.
1334 * Pointer to the list of items.
1335 * @param[in] actions
1336 * Pointer to the list of actions.
1337 * @param[out] item_flags
1338 * Pointer to bit mask of all items detected.
1339 * @param[out] action_flags
1340 * Pointer to bit mask of all actions detected.
1342 * Pointer to the error structure.
1345 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1348 static struct mlx5_flow *
1349 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1350 const struct rte_flow_item items[],
1351 const struct rte_flow_action actions[],
1352 uint64_t *item_flags,
1353 uint64_t *action_flags,
1354 struct rte_flow_error *error)
1356 uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1357 struct mlx5_flow *flow;
1359 size += flow_verbs_get_actions_and_size(actions, action_flags);
1360 size += flow_verbs_get_items_and_size(items, item_flags);
1361 flow = rte_calloc(__func__, 1, size, 0);
1363 rte_flow_error_set(error, ENOMEM,
1364 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1365 "not enough memory to create flow");
1368 flow->verbs.attr = (void *)(flow + 1);
1370 (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
1375 * Fill the flow with verb spec.
1378 * Pointer to Ethernet device.
1379 * @param[in, out] dev_flow
1380 * Pointer to the mlx5 flow.
1382 * Pointer to the flow attributes.
1384 * Pointer to the list of items.
1385 * @param[in] actions
1386 * Pointer to the list of actions.
1388 * Pointer to the error structure.
1391 * 0 on success, else a negative errno value otherwise and rte_ernno is set.
1394 flow_verbs_translate(struct rte_eth_dev *dev,
1395 struct mlx5_flow *dev_flow,
1396 const struct rte_flow_attr *attr,
1397 const struct rte_flow_item items[],
1398 const struct rte_flow_action actions[],
1399 struct rte_flow_error *error)
1401 uint64_t action_flags = 0;
1402 uint64_t item_flags = 0;
1403 uint64_t priority = attr->priority;
1404 struct priv *priv = dev->data->dev_private;
1406 if (priority == MLX5_FLOW_PRIO_RSVD)
1407 priority = priv->config.flow_prio - 1;
1408 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1410 switch (actions->type) {
1411 case RTE_FLOW_ACTION_TYPE_VOID:
1413 case RTE_FLOW_ACTION_TYPE_FLAG:
1414 flow_verbs_translate_action_flag(actions,
1418 case RTE_FLOW_ACTION_TYPE_MARK:
1419 flow_verbs_translate_action_mark(actions,
1423 case RTE_FLOW_ACTION_TYPE_DROP:
1424 flow_verbs_translate_action_drop(&action_flags,
1427 case RTE_FLOW_ACTION_TYPE_QUEUE:
1428 flow_verbs_translate_action_queue(actions,
1432 case RTE_FLOW_ACTION_TYPE_RSS:
1433 flow_verbs_translate_action_rss(actions,
1437 case RTE_FLOW_ACTION_TYPE_COUNT:
1438 ret = flow_verbs_translate_action_count(dev,
1447 return rte_flow_error_set(error, ENOTSUP,
1448 RTE_FLOW_ERROR_TYPE_ACTION,
1450 "action not supported");
1453 dev_flow->flow->actions |= action_flags;
1454 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1455 switch (items->type) {
1456 case RTE_FLOW_ITEM_TYPE_VOID:
1458 case RTE_FLOW_ITEM_TYPE_ETH:
1459 flow_verbs_translate_item_eth(items, &item_flags,
1462 case RTE_FLOW_ITEM_TYPE_VLAN:
1463 flow_verbs_translate_item_vlan(items, &item_flags,
1466 case RTE_FLOW_ITEM_TYPE_IPV4:
1467 flow_verbs_translate_item_ipv4(items, &item_flags,
1470 case RTE_FLOW_ITEM_TYPE_IPV6:
1471 flow_verbs_translate_item_ipv6(items, &item_flags,
1474 case RTE_FLOW_ITEM_TYPE_UDP:
1475 flow_verbs_translate_item_udp(items, &item_flags,
1478 case RTE_FLOW_ITEM_TYPE_TCP:
1479 flow_verbs_translate_item_tcp(items, &item_flags,
1482 case RTE_FLOW_ITEM_TYPE_VXLAN:
1483 flow_verbs_translate_item_vxlan(items, &item_flags,
1486 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1487 flow_verbs_translate_item_vxlan_gpe(items, &item_flags,
1490 case RTE_FLOW_ITEM_TYPE_GRE:
1491 flow_verbs_translate_item_gre(items, &item_flags,
1494 case RTE_FLOW_ITEM_TYPE_MPLS:
1495 flow_verbs_translate_item_mpls(items, &item_flags,
1499 return rte_flow_error_set(error, ENOTSUP,
1500 RTE_FLOW_ERROR_TYPE_ITEM,
1502 "item not supported");
1505 dev_flow->verbs.attr->priority =
1506 mlx5_flow_adjust_priority(dev, priority,
1507 dev_flow->verbs.attr->priority);
1512 * Remove the flow from the NIC but keeps it in memory.
1515 * Pointer to the Ethernet device structure.
1516 * @param[in, out] flow
1517 * Pointer to flow structure.
1520 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1522 struct mlx5_flow_verbs *verbs;
1523 struct mlx5_flow *dev_flow;
1527 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1528 verbs = &dev_flow->verbs;
1530 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1534 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1535 mlx5_hrxq_drop_release(dev);
1537 mlx5_hrxq_release(dev, verbs->hrxq);
1541 if (flow->counter) {
1542 flow_verbs_counter_release(flow->counter);
1543 flow->counter = NULL;
1548 * Remove the flow from the NIC and the memory.
1551 * Pointer to the Ethernet device structure.
1552 * @param[in, out] flow
1553 * Pointer to flow structure.
1556 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1558 struct mlx5_flow *dev_flow;
1562 flow_verbs_remove(dev, flow);
1563 while (!LIST_EMPTY(&flow->dev_flows)) {
1564 dev_flow = LIST_FIRST(&flow->dev_flows);
1565 LIST_REMOVE(dev_flow, next);
1571 * Apply the flow to the NIC.
1574 * Pointer to the Ethernet device structure.
1575 * @param[in, out] flow
1576 * Pointer to flow structure.
1578 * Pointer to error structure.
1581 * 0 on success, a negative errno value otherwise and rte_errno is set.
1584 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1585 struct rte_flow_error *error)
1587 struct mlx5_flow_verbs *verbs;
1588 struct mlx5_flow *dev_flow;
1591 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1592 verbs = &dev_flow->verbs;
1593 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1594 verbs->hrxq = mlx5_hrxq_drop_new(dev);
1598 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1599 "cannot get drop hash queue");
1603 struct mlx5_hrxq *hrxq;
1605 hrxq = mlx5_hrxq_get(dev, flow->key,
1606 MLX5_RSS_HASH_KEY_LEN,
1609 flow->rss.queue_num);
1611 hrxq = mlx5_hrxq_new(dev, flow->key,
1612 MLX5_RSS_HASH_KEY_LEN,
1615 flow->rss.queue_num,
1617 MLX5_FLOW_LAYER_TUNNEL));
1621 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1622 "cannot get hash queue");
1627 verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1630 rte_flow_error_set(error, errno,
1631 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1633 "hardware refuses to create flow");
1639 err = rte_errno; /* Save rte_errno before cleanup. */
1640 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1641 verbs = &dev_flow->verbs;
1643 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1644 mlx5_hrxq_drop_release(dev);
1646 mlx5_hrxq_release(dev, verbs->hrxq);
1650 rte_errno = err; /* Restore rte_errno. */
1654 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1655 .validate = flow_verbs_validate,
1656 .prepare = flow_verbs_prepare,
1657 .translate = flow_verbs_translate,
1658 .apply = flow_verbs_apply,
1659 .remove = flow_verbs_remove,
1660 .destroy = flow_verbs_destroy,