1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
40 * Pointer to the Ethernet device structure.
42 * Indicate if this counter is shared with other flows.
47 * A pointer to the counter, NULL otherwise and rte_errno is set.
49 static struct mlx5_flow_counter *
50 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
52 struct priv *priv = dev->data->dev_private;
53 struct mlx5_flow_counter *cnt;
55 LIST_FOREACH(cnt, &priv->flow_counters, next) {
56 if (!cnt->shared || cnt->shared != shared)
63 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
65 struct mlx5_flow_counter tmpl = {
68 .cs = mlx5_glue->create_counter_set
70 &(struct ibv_counter_set_init_attr){
81 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
87 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
95 * Release a flow counter.
98 * Pointer to the counter handler.
101 flow_verbs_counter_release(struct mlx5_flow_counter *counter)
103 if (--counter->ref_cnt == 0) {
104 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
105 LIST_REMOVE(counter, next);
111 * Add a verbs item specification into @p flow.
113 * @param[in, out] flow
114 * Pointer to flow structure.
116 * Create specification.
118 * Size in bytes of the specification to copy.
121 flow_verbs_spec_add(struct mlx5_flow *flow, void *src, unsigned int size)
123 struct mlx5_flow_verbs *verbs = &flow->verbs;
128 dst = (void *)(verbs->specs + verbs->size);
129 memcpy(dst, src, size);
130 ++verbs->attr->num_of_specs;
136 * Convert the @p item into a Verbs specification. This function assumes that
137 * the input is valid and that there is space to insert the requested item
141 * Item specification.
142 * @param[in] item_flags
143 * Bit field with all detected items.
144 * @param[in, out] dev_flow
145 * Pointer to dev_flow structure.
148 flow_verbs_translate_item_eth(const struct rte_flow_item *item,
149 uint64_t *item_flags,
150 struct mlx5_flow *dev_flow)
152 const struct rte_flow_item_eth *spec = item->spec;
153 const struct rte_flow_item_eth *mask = item->mask;
154 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
155 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
156 struct ibv_flow_spec_eth eth = {
157 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
162 mask = &rte_flow_item_eth_mask;
166 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
167 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
168 eth.val.ether_type = spec->type;
169 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
170 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
171 eth.mask.ether_type = mask->type;
172 /* Remove unwanted bits from values. */
173 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
174 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
175 eth.val.src_mac[i] &= eth.mask.src_mac[i];
177 eth.val.ether_type &= eth.mask.ether_type;
178 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
180 flow_verbs_spec_add(dev_flow, ð, size);
181 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
182 MLX5_FLOW_LAYER_OUTER_L2;
186 * Update the VLAN tag in the Verbs Ethernet specification.
187 * This function assumes that the input is valid and there is space to add
188 * the requested item.
190 * @param[in, out] attr
191 * Pointer to Verbs attributes structure.
193 * Verbs structure containing the VLAN information to copy.
196 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
197 struct ibv_flow_spec_eth *eth)
200 const enum ibv_flow_spec_type search = eth->type;
201 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
202 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
204 for (i = 0; i != attr->num_of_specs; ++i) {
205 if (hdr->type == search) {
206 struct ibv_flow_spec_eth *e =
207 (struct ibv_flow_spec_eth *)hdr;
209 e->val.vlan_tag = eth->val.vlan_tag;
210 e->mask.vlan_tag = eth->mask.vlan_tag;
211 e->val.ether_type = eth->val.ether_type;
212 e->mask.ether_type = eth->mask.ether_type;
215 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
220 * Convert the @p item into a Verbs specification. This function assumes that
221 * the input is valid and that there is space to insert the requested item
225 * Item specification.
226 * @param[in, out] item_flags
227 * Bit mask that holds all detected items.
228 * @param[in, out] dev_flow
229 * Pointer to dev_flow structure.
232 flow_verbs_translate_item_vlan(const struct rte_flow_item *item,
233 uint64_t *item_flags,
234 struct mlx5_flow *dev_flow)
236 const struct rte_flow_item_vlan *spec = item->spec;
237 const struct rte_flow_item_vlan *mask = item->mask;
238 unsigned int size = sizeof(struct ibv_flow_spec_eth);
239 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
240 struct ibv_flow_spec_eth eth = {
241 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
244 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
245 MLX5_FLOW_LAYER_OUTER_L2;
248 mask = &rte_flow_item_vlan_mask;
250 eth.val.vlan_tag = spec->tci;
251 eth.mask.vlan_tag = mask->tci;
252 eth.val.vlan_tag &= eth.mask.vlan_tag;
253 eth.val.ether_type = spec->inner_type;
254 eth.mask.ether_type = mask->inner_type;
255 eth.val.ether_type &= eth.mask.ether_type;
257 if (!(*item_flags & l2m)) {
258 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
259 flow_verbs_spec_add(dev_flow, ð, size);
261 flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
262 size = 0; /* Only an update is done in eth specification. */
264 *item_flags |= tunnel ?
265 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
266 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
270 * Convert the @p item into a Verbs specification. This function assumes that
271 * the input is valid and that there is space to insert the requested item
275 * Item specification.
276 * @param[in, out] item_flags
277 * Bit mask that marks all detected items.
278 * @param[in, out] dev_flow
279 * Pointer to sepacific flow structure.
282 flow_verbs_translate_item_ipv4(const struct rte_flow_item *item,
283 uint64_t *item_flags,
284 struct mlx5_flow *dev_flow)
286 const struct rte_flow_item_ipv4 *spec = item->spec;
287 const struct rte_flow_item_ipv4 *mask = item->mask;
288 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
289 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
290 struct ibv_flow_spec_ipv4_ext ipv4 = {
291 .type = IBV_FLOW_SPEC_IPV4_EXT |
292 (tunnel ? IBV_FLOW_SPEC_INNER : 0),
297 mask = &rte_flow_item_ipv4_mask;
298 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
299 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
301 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
302 .src_ip = spec->hdr.src_addr,
303 .dst_ip = spec->hdr.dst_addr,
304 .proto = spec->hdr.next_proto_id,
305 .tos = spec->hdr.type_of_service,
307 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
308 .src_ip = mask->hdr.src_addr,
309 .dst_ip = mask->hdr.dst_addr,
310 .proto = mask->hdr.next_proto_id,
311 .tos = mask->hdr.type_of_service,
313 /* Remove unwanted bits from values. */
314 ipv4.val.src_ip &= ipv4.mask.src_ip;
315 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
316 ipv4.val.proto &= ipv4.mask.proto;
317 ipv4.val.tos &= ipv4.mask.tos;
319 dev_flow->verbs.hash_fields |=
320 mlx5_flow_hashfields_adjust(dev_flow, tunnel,
321 MLX5_IPV4_LAYER_TYPES,
322 MLX5_IPV4_IBV_RX_HASH);
323 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
324 flow_verbs_spec_add(dev_flow, &ipv4, size);
328 * Convert the @p item into a Verbs specification. This function assumes that
329 * the input is valid and that there is space to insert the requested item
333 * Item specification.
334 * @param[in, out] item_flags
335 * Bit mask that marks all detected items.
336 * @param[in, out] dev_flow
337 * Pointer to sepacific flow structure.
340 flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
341 uint64_t *item_flags,
342 struct mlx5_flow *dev_flow)
344 const struct rte_flow_item_ipv6 *spec = item->spec;
345 const struct rte_flow_item_ipv6 *mask = item->mask;
346 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
347 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
348 struct ibv_flow_spec_ipv6 ipv6 = {
349 .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
354 mask = &rte_flow_item_ipv6_mask;
355 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
356 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
359 uint32_t vtc_flow_val;
360 uint32_t vtc_flow_mask;
362 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
363 RTE_DIM(ipv6.val.src_ip));
364 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
365 RTE_DIM(ipv6.val.dst_ip));
366 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
367 RTE_DIM(ipv6.mask.src_ip));
368 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
369 RTE_DIM(ipv6.mask.dst_ip));
370 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
371 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
372 ipv6.val.flow_label =
373 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
375 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
377 ipv6.val.next_hdr = spec->hdr.proto;
378 ipv6.val.hop_limit = spec->hdr.hop_limits;
379 ipv6.mask.flow_label =
380 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
382 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
384 ipv6.mask.next_hdr = mask->hdr.proto;
385 ipv6.mask.hop_limit = mask->hdr.hop_limits;
386 /* Remove unwanted bits from values. */
387 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
388 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
389 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
391 ipv6.val.flow_label &= ipv6.mask.flow_label;
392 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
393 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
394 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
396 dev_flow->verbs.hash_fields |=
397 mlx5_flow_hashfields_adjust(dev_flow, tunnel,
398 MLX5_IPV6_LAYER_TYPES,
399 MLX5_IPV6_IBV_RX_HASH);
400 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
401 flow_verbs_spec_add(dev_flow, &ipv6, size);
405 * Convert the @p item into a Verbs specification. This function assumes that
406 * the input is valid and that there is space to insert the requested item
410 * Item specification.
411 * @param[in, out] item_flags
412 * Bit mask that marks all detected items.
413 * @param[in, out] dev_flow
414 * Pointer to sepacific flow structure.
417 flow_verbs_translate_item_udp(const struct rte_flow_item *item,
418 uint64_t *item_flags,
419 struct mlx5_flow *dev_flow)
421 const struct rte_flow_item_udp *spec = item->spec;
422 const struct rte_flow_item_udp *mask = item->mask;
423 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
424 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
425 struct ibv_flow_spec_tcp_udp udp = {
426 .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
431 mask = &rte_flow_item_udp_mask;
432 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
433 MLX5_FLOW_LAYER_OUTER_L4_UDP;
435 udp.val.dst_port = spec->hdr.dst_port;
436 udp.val.src_port = spec->hdr.src_port;
437 udp.mask.dst_port = mask->hdr.dst_port;
438 udp.mask.src_port = mask->hdr.src_port;
439 /* Remove unwanted bits from values. */
440 udp.val.src_port &= udp.mask.src_port;
441 udp.val.dst_port &= udp.mask.dst_port;
443 dev_flow->verbs.hash_fields |=
444 mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_UDP,
445 (IBV_RX_HASH_SRC_PORT_UDP |
446 IBV_RX_HASH_DST_PORT_UDP));
447 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
448 flow_verbs_spec_add(dev_flow, &udp, size);
452 * Convert the @p item into a Verbs specification. This function assumes that
453 * the input is valid and that there is space to insert the requested item
457 * Item specification.
458 * @param[in, out] item_flags
459 * Bit mask that marks all detected items.
460 * @param[in, out] dev_flow
461 * Pointer to sepacific flow structure.
464 flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
465 uint64_t *item_flags,
466 struct mlx5_flow *dev_flow)
468 const struct rte_flow_item_tcp *spec = item->spec;
469 const struct rte_flow_item_tcp *mask = item->mask;
470 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
471 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
472 struct ibv_flow_spec_tcp_udp tcp = {
473 .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
478 mask = &rte_flow_item_tcp_mask;
479 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
480 MLX5_FLOW_LAYER_OUTER_L4_TCP;
482 tcp.val.dst_port = spec->hdr.dst_port;
483 tcp.val.src_port = spec->hdr.src_port;
484 tcp.mask.dst_port = mask->hdr.dst_port;
485 tcp.mask.src_port = mask->hdr.src_port;
486 /* Remove unwanted bits from values. */
487 tcp.val.src_port &= tcp.mask.src_port;
488 tcp.val.dst_port &= tcp.mask.dst_port;
490 dev_flow->verbs.hash_fields |=
491 mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_TCP,
492 (IBV_RX_HASH_SRC_PORT_TCP |
493 IBV_RX_HASH_DST_PORT_TCP));
494 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
495 flow_verbs_spec_add(dev_flow, &tcp, size);
499 * Convert the @p item into a Verbs specification. This function assumes that
500 * the input is valid and that there is space to insert the requested item
504 * Item specification.
505 * @param[in, out] item_flags
506 * Bit mask that marks all detected items.
507 * @param[in, out] dev_flow
508 * Pointer to sepacific flow structure.
511 flow_verbs_translate_item_vxlan(const struct rte_flow_item *item,
512 uint64_t *item_flags,
513 struct mlx5_flow *dev_flow)
515 const struct rte_flow_item_vxlan *spec = item->spec;
516 const struct rte_flow_item_vxlan *mask = item->mask;
517 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
518 struct ibv_flow_spec_tunnel vxlan = {
519 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
525 } id = { .vlan_id = 0, };
528 mask = &rte_flow_item_vxlan_mask;
530 memcpy(&id.vni[1], spec->vni, 3);
531 vxlan.val.tunnel_id = id.vlan_id;
532 memcpy(&id.vni[1], mask->vni, 3);
533 vxlan.mask.tunnel_id = id.vlan_id;
534 /* Remove unwanted bits from values. */
535 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
537 flow_verbs_spec_add(dev_flow, &vxlan, size);
538 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
539 *item_flags |= MLX5_FLOW_LAYER_VXLAN;
543 * Convert the @p item into a Verbs specification. This function assumes that
544 * the input is valid and that there is space to insert the requested item
548 * Item specification.
549 * @param[in, out] item_flags
550 * Bit mask that marks all detected items.
551 * @param[in, out] dev_flow
552 * Pointer to sepacific flow structure.
555 flow_verbs_translate_item_vxlan_gpe(const struct rte_flow_item *item,
556 uint64_t *item_flags,
557 struct mlx5_flow *dev_flow)
559 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
560 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
561 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
562 struct ibv_flow_spec_tunnel vxlan_gpe = {
563 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
569 } id = { .vlan_id = 0, };
572 mask = &rte_flow_item_vxlan_gpe_mask;
574 memcpy(&id.vni[1], spec->vni, 3);
575 vxlan_gpe.val.tunnel_id = id.vlan_id;
576 memcpy(&id.vni[1], mask->vni, 3);
577 vxlan_gpe.mask.tunnel_id = id.vlan_id;
578 /* Remove unwanted bits from values. */
579 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
581 flow_verbs_spec_add(dev_flow, &vxlan_gpe, size);
582 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
583 *item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
587 * Update the protocol in Verbs IPv4/IPv6 spec.
589 * @param[in, out] attr
590 * Pointer to Verbs attributes structure.
592 * Specification type to search in order to update the IP protocol.
593 * @param[in] protocol
594 * Protocol value to set if none is present in the specification.
597 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
598 enum ibv_flow_spec_type search,
602 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
603 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
607 for (i = 0; i != attr->num_of_specs; ++i) {
608 if (hdr->type == search) {
610 struct ibv_flow_spec_ipv4_ext *ipv4;
611 struct ibv_flow_spec_ipv6 *ipv6;
615 case IBV_FLOW_SPEC_IPV4_EXT:
616 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
617 if (!ip.ipv4->val.proto) {
618 ip.ipv4->val.proto = protocol;
619 ip.ipv4->mask.proto = 0xff;
622 case IBV_FLOW_SPEC_IPV6:
623 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
624 if (!ip.ipv6->val.next_hdr) {
625 ip.ipv6->val.next_hdr = protocol;
626 ip.ipv6->mask.next_hdr = 0xff;
634 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
639 * Convert the @p item into a Verbs specification. This function assumes that
640 * the input is valid and that there is space to insert the requested item
644 * Item specification.
645 * @param[in, out] item_flags
646 * Bit mask that marks all detected items.
647 * @param[in, out] dev_flow
648 * Pointer to sepacific flow structure.
651 flow_verbs_translate_item_gre(const struct rte_flow_item *item __rte_unused,
652 uint64_t *item_flags,
653 struct mlx5_flow *dev_flow)
655 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
656 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
657 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
658 struct ibv_flow_spec_tunnel tunnel = {
659 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
663 const struct rte_flow_item_gre *spec = item->spec;
664 const struct rte_flow_item_gre *mask = item->mask;
665 unsigned int size = sizeof(struct ibv_flow_spec_gre);
666 struct ibv_flow_spec_gre tunnel = {
667 .type = IBV_FLOW_SPEC_GRE,
672 mask = &rte_flow_item_gre_mask;
674 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
675 tunnel.val.protocol = spec->protocol;
676 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
677 tunnel.mask.protocol = mask->protocol;
678 /* Remove unwanted bits from values. */
679 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
680 tunnel.val.protocol &= tunnel.mask.protocol;
681 tunnel.val.key &= tunnel.mask.key;
684 if (*item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
685 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
686 IBV_FLOW_SPEC_IPV4_EXT,
689 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
692 flow_verbs_spec_add(dev_flow, &tunnel, size);
693 verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
694 *item_flags |= MLX5_FLOW_LAYER_GRE;
698 * Convert the @p action into a Verbs specification. This function assumes that
699 * the input is valid and that there is space to insert the requested action
700 * into the flow. This function also return the action that was added.
703 * Item specification.
704 * @param[in, out] item_flags
705 * Bit mask that marks all detected items.
706 * @param[in, out] dev_flow
707 * Pointer to sepacific flow structure.
710 flow_verbs_translate_item_mpls(const struct rte_flow_item *item __rte_unused,
711 uint64_t *action_flags __rte_unused,
712 struct mlx5_flow *dev_flow __rte_unused)
714 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
715 const struct rte_flow_item_mpls *spec = item->spec;
716 const struct rte_flow_item_mpls *mask = item->mask;
717 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
718 struct ibv_flow_spec_mpls mpls = {
719 .type = IBV_FLOW_SPEC_MPLS,
724 mask = &rte_flow_item_mpls_mask;
726 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
727 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
728 /* Remove unwanted bits from values. */
729 mpls.val.label &= mpls.mask.label;
731 flow_verbs_spec_add(dev_flow, &mpls, size);
732 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
733 *action_flags |= MLX5_FLOW_LAYER_MPLS;
738 * Convert the @p action into a Verbs specification. This function assumes that
739 * the input is valid and that there is space to insert the requested action
740 * into the flow. This function also return the action that was added.
742 * @param[in, out] action_flags
743 * Pointer to the detected actions.
744 * @param[in] dev_flow
745 * Pointer to mlx5_flow.
748 flow_verbs_translate_action_drop(uint64_t *action_flags,
749 struct mlx5_flow *dev_flow)
751 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
752 struct ibv_flow_spec_action_drop drop = {
753 .type = IBV_FLOW_SPEC_ACTION_DROP,
757 flow_verbs_spec_add(dev_flow, &drop, size);
758 *action_flags |= MLX5_FLOW_ACTION_DROP;
762 * Convert the @p action into a Verbs specification. This function assumes that
763 * the input is valid and that there is space to insert the requested action
764 * into the flow. This function also return the action that was added.
767 * Action configuration.
768 * @param[in, out] action_flags
769 * Pointer to the detected actions.
770 * @param[in] dev_flow
771 * Pointer to mlx5_flow.
774 flow_verbs_translate_action_queue(const struct rte_flow_action *action,
775 uint64_t *action_flags,
776 struct mlx5_flow *dev_flow)
778 const struct rte_flow_action_queue *queue = action->conf;
779 struct rte_flow *flow = dev_flow->flow;
782 (*flow->queue)[0] = queue->index;
783 flow->rss.queue_num = 1;
784 *action_flags |= MLX5_FLOW_ACTION_QUEUE;
788 * Convert the @p action into a Verbs specification. This function assumes that
789 * the input is valid and that there is space to insert the requested action
790 * into the flow. This function also return the action that was added.
793 * Action configuration.
794 * @param[in, out] action_flags
795 * Pointer to the detected actions.
796 * @param[in] dev_flow
797 * Pointer to mlx5_flow.
800 flow_verbs_translate_action_rss(const struct rte_flow_action *action,
801 uint64_t *action_flags,
802 struct mlx5_flow *dev_flow)
804 const struct rte_flow_action_rss *rss = action->conf;
805 struct rte_flow *flow = dev_flow->flow;
808 memcpy((*flow->queue), rss->queue,
809 rss->queue_num * sizeof(uint16_t));
810 flow->rss.queue_num = rss->queue_num;
811 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
812 flow->rss.types = rss->types;
813 flow->rss.level = rss->level;
814 *action_flags |= MLX5_FLOW_ACTION_RSS;
818 * Convert the @p action into a Verbs specification. This function assumes that
819 * the input is valid and that there is space to insert the requested action
820 * into the flow. This function also return the action that was added.
823 * Action configuration.
824 * @param[in, out] action_flags
825 * Pointer to the detected actions.
826 * @param[in] dev_flow
827 * Pointer to mlx5_flow.
830 flow_verbs_translate_action_flag
831 (const struct rte_flow_action *action __rte_unused,
832 uint64_t *action_flags,
833 struct mlx5_flow *dev_flow)
835 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
836 struct ibv_flow_spec_action_tag tag = {
837 .type = IBV_FLOW_SPEC_ACTION_TAG,
839 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
841 *action_flags |= MLX5_FLOW_ACTION_MARK;
842 flow_verbs_spec_add(dev_flow, &tag, size);
846 * Update verbs specification to modify the flag to mark.
848 * @param[in, out] verbs
849 * Pointer to the mlx5_flow_verbs structure.
851 * Mark identifier to replace the flag.
854 flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
856 struct ibv_spec_header *hdr;
861 /* Update Verbs specification. */
862 hdr = (struct ibv_spec_header *)verbs->specs;
865 for (i = 0; i != verbs->attr->num_of_specs; ++i) {
866 if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
867 struct ibv_flow_spec_action_tag *t =
868 (struct ibv_flow_spec_action_tag *)hdr;
870 t->tag_id = mlx5_flow_mark_set(mark_id);
872 hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
877 * Convert the @p action into a Verbs specification. This function assumes that
878 * the input is valid and that there is space to insert the requested action
879 * into the flow. This function also return the action that was added.
882 * Action configuration.
883 * @param[in, out] action_flags
884 * Pointer to the detected actions.
885 * @param[in] dev_flow
886 * Pointer to mlx5_flow.
889 flow_verbs_translate_action_mark(const struct rte_flow_action *action,
890 uint64_t *action_flags,
891 struct mlx5_flow *dev_flow)
893 const struct rte_flow_action_mark *mark = action->conf;
894 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
895 struct ibv_flow_spec_action_tag tag = {
896 .type = IBV_FLOW_SPEC_ACTION_TAG,
899 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
901 if (*action_flags & MLX5_FLOW_ACTION_FLAG) {
902 flow_verbs_mark_update(verbs, mark->id);
905 tag.tag_id = mlx5_flow_mark_set(mark->id);
906 flow_verbs_spec_add(dev_flow, &tag, size);
908 *action_flags |= MLX5_FLOW_ACTION_MARK;
912 * Convert the @p action into a Verbs specification. This function assumes that
913 * the input is valid and that there is space to insert the requested action
914 * into the flow. This function also return the action that was added.
917 * Pointer to the Ethernet device structure.
919 * Action configuration.
920 * @param[in, out] action_flags
921 * Pointer to the detected actions.
922 * @param[in] dev_flow
923 * Pointer to mlx5_flow.
925 * Pointer to error structure.
928 * 0 On success else a negative errno value is returned and rte_errno is set.
931 flow_verbs_translate_action_count(struct rte_eth_dev *dev,
932 const struct rte_flow_action *action,
933 uint64_t *action_flags,
934 struct mlx5_flow *dev_flow,
935 struct rte_flow_error *error)
937 const struct rte_flow_action_count *count = action->conf;
938 struct rte_flow *flow = dev_flow->flow;
939 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
940 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
941 struct ibv_flow_spec_counter_action counter = {
942 .type = IBV_FLOW_SPEC_ACTION_COUNT,
947 if (!flow->counter) {
948 flow->counter = flow_verbs_counter_new(dev, count->shared,
951 return rte_flow_error_set(error, rte_errno,
952 RTE_FLOW_ERROR_TYPE_ACTION,
957 *action_flags |= MLX5_FLOW_ACTION_COUNT;
958 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
959 counter.counter_set_handle = flow->counter->cs->handle;
960 flow_verbs_spec_add(dev_flow, &counter, size);
966 * Internal validation function. For validating both actions and items.
969 * Pointer to the Ethernet device structure.
971 * Pointer to the flow attributes.
973 * Pointer to the list of items.
975 * Pointer to the list of actions.
977 * Pointer to the error structure.
980 * 0 on success, a negative errno value otherwise and rte_errno is set.
983 flow_verbs_validate(struct rte_eth_dev *dev,
984 const struct rte_flow_attr *attr,
985 const struct rte_flow_item items[],
986 const struct rte_flow_action actions[],
987 struct rte_flow_error *error)
990 uint32_t action_flags = 0;
991 uint32_t item_flags = 0;
993 uint8_t next_protocol = 0xff;
997 ret = mlx5_flow_validate_attributes(dev, attr, error);
1000 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1002 switch (items->type) {
1003 case RTE_FLOW_ITEM_TYPE_VOID:
1005 case RTE_FLOW_ITEM_TYPE_ETH:
1006 ret = mlx5_flow_validate_item_eth(items, item_flags,
1010 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1011 MLX5_FLOW_LAYER_OUTER_L2;
1013 case RTE_FLOW_ITEM_TYPE_VLAN:
1014 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1018 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1019 MLX5_FLOW_LAYER_OUTER_VLAN;
1021 case RTE_FLOW_ITEM_TYPE_IPV4:
1022 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1026 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1027 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1028 if (items->mask != NULL &&
1029 ((const struct rte_flow_item_ipv4 *)
1030 items->mask)->hdr.next_proto_id)
1032 ((const struct rte_flow_item_ipv4 *)
1033 (items->spec))->hdr.next_proto_id;
1035 case RTE_FLOW_ITEM_TYPE_IPV6:
1036 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1040 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1041 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1042 if (items->mask != NULL &&
1043 ((const struct rte_flow_item_ipv6 *)
1044 items->mask)->hdr.proto)
1046 ((const struct rte_flow_item_ipv6 *)
1047 items->spec)->hdr.proto;
1049 case RTE_FLOW_ITEM_TYPE_UDP:
1050 ret = mlx5_flow_validate_item_udp(items, item_flags,
1055 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1056 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1058 case RTE_FLOW_ITEM_TYPE_TCP:
1059 ret = mlx5_flow_validate_item_tcp(items, item_flags,
1060 next_protocol, error);
1063 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1064 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1066 case RTE_FLOW_ITEM_TYPE_VXLAN:
1067 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1071 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1073 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1074 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1079 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1081 case RTE_FLOW_ITEM_TYPE_GRE:
1082 ret = mlx5_flow_validate_item_gre(items, item_flags,
1083 next_protocol, error);
1086 item_flags |= MLX5_FLOW_LAYER_GRE;
1088 case RTE_FLOW_ITEM_TYPE_MPLS:
1089 ret = mlx5_flow_validate_item_mpls(items, item_flags,
1094 if (next_protocol != 0xff &&
1095 next_protocol != IPPROTO_MPLS)
1096 return rte_flow_error_set
1098 RTE_FLOW_ERROR_TYPE_ITEM, items,
1099 "protocol filtering not compatible"
1100 " with MPLS layer");
1101 item_flags |= MLX5_FLOW_LAYER_MPLS;
1104 return rte_flow_error_set(error, ENOTSUP,
1105 RTE_FLOW_ERROR_TYPE_ITEM,
1106 NULL, "item not supported");
1109 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1110 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1111 switch (actions->type) {
1112 case RTE_FLOW_ACTION_TYPE_VOID:
1114 case RTE_FLOW_ACTION_TYPE_FLAG:
1115 ret = mlx5_flow_validate_action_flag(action_flags,
1120 action_flags |= MLX5_FLOW_ACTION_FLAG;
1122 case RTE_FLOW_ACTION_TYPE_MARK:
1123 ret = mlx5_flow_validate_action_mark(actions,
1129 action_flags |= MLX5_FLOW_ACTION_MARK;
1131 case RTE_FLOW_ACTION_TYPE_DROP:
1132 ret = mlx5_flow_validate_action_drop(action_flags,
1137 action_flags |= MLX5_FLOW_ACTION_DROP;
1139 case RTE_FLOW_ACTION_TYPE_QUEUE:
1140 ret = mlx5_flow_validate_action_queue(actions,
1146 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1148 case RTE_FLOW_ACTION_TYPE_RSS:
1149 ret = mlx5_flow_validate_action_rss(actions,
1155 action_flags |= MLX5_FLOW_ACTION_RSS;
1157 case RTE_FLOW_ACTION_TYPE_COUNT:
1158 ret = mlx5_flow_validate_action_count(dev, attr, error);
1161 action_flags |= MLX5_FLOW_ACTION_COUNT;
1164 return rte_flow_error_set(error, ENOTSUP,
1165 RTE_FLOW_ERROR_TYPE_ACTION,
1167 "action not supported");
1170 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1171 return rte_flow_error_set(error, EINVAL,
1172 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1173 "no fate action is found");
1178 * Calculate the required bytes that are needed for the action part of the verbs
1179 * flow, in addtion returns bit-fields with all the detected action, in order to
1180 * avoid another interation over the actions.
1182 * @param[in] actions
1183 * Pointer to the list of actions.
1184 * @param[out] action_flags
1185 * Pointer to the detected actions.
1188 * The size of the memory needed for all actions.
1191 flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
1192 uint64_t *action_flags)
1195 uint64_t detected_actions = 0;
1197 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1198 switch (actions->type) {
1199 case RTE_FLOW_ACTION_TYPE_VOID:
1201 case RTE_FLOW_ACTION_TYPE_FLAG:
1202 size += sizeof(struct ibv_flow_spec_action_tag);
1203 detected_actions |= MLX5_FLOW_ACTION_FLAG;
1205 case RTE_FLOW_ACTION_TYPE_MARK:
1206 size += sizeof(struct ibv_flow_spec_action_tag);
1207 detected_actions |= MLX5_FLOW_ACTION_MARK;
1209 case RTE_FLOW_ACTION_TYPE_DROP:
1210 size += sizeof(struct ibv_flow_spec_action_drop);
1211 detected_actions |= MLX5_FLOW_ACTION_DROP;
1213 case RTE_FLOW_ACTION_TYPE_QUEUE:
1214 detected_actions |= MLX5_FLOW_ACTION_QUEUE;
1216 case RTE_FLOW_ACTION_TYPE_RSS:
1217 detected_actions |= MLX5_FLOW_ACTION_RSS;
1219 case RTE_FLOW_ACTION_TYPE_COUNT:
1220 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
1221 size += sizeof(struct ibv_flow_spec_counter_action);
1223 detected_actions |= MLX5_FLOW_ACTION_COUNT;
1229 *action_flags = detected_actions;
1234 * Calculate the required bytes that are needed for the item part of the verbs
1235 * flow, in addtion returns bit-fields with all the detected action, in order to
1236 * avoid another interation over the actions.
1238 * @param[in] actions
1239 * Pointer to the list of items.
1240 * @param[in, out] item_flags
1241 * Pointer to the detected items.
1244 * The size of the memory needed for all items.
1247 flow_verbs_get_items_and_size(const struct rte_flow_item items[],
1248 uint64_t *item_flags)
1251 uint64_t detected_items = 0;
1252 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
1254 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1255 switch (items->type) {
1256 case RTE_FLOW_ITEM_TYPE_VOID:
1258 case RTE_FLOW_ITEM_TYPE_ETH:
1259 size += sizeof(struct ibv_flow_spec_eth);
1260 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1261 MLX5_FLOW_LAYER_OUTER_L2;
1263 case RTE_FLOW_ITEM_TYPE_VLAN:
1264 size += sizeof(struct ibv_flow_spec_eth);
1265 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1266 MLX5_FLOW_LAYER_OUTER_VLAN;
1268 case RTE_FLOW_ITEM_TYPE_IPV4:
1269 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1270 detected_items |= tunnel ?
1271 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1272 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1274 case RTE_FLOW_ITEM_TYPE_IPV6:
1275 size += sizeof(struct ibv_flow_spec_ipv6);
1276 detected_items |= tunnel ?
1277 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1278 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1280 case RTE_FLOW_ITEM_TYPE_UDP:
1281 size += sizeof(struct ibv_flow_spec_tcp_udp);
1282 detected_items |= tunnel ?
1283 MLX5_FLOW_LAYER_INNER_L4_UDP :
1284 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1286 case RTE_FLOW_ITEM_TYPE_TCP:
1287 size += sizeof(struct ibv_flow_spec_tcp_udp);
1288 detected_items |= tunnel ?
1289 MLX5_FLOW_LAYER_INNER_L4_TCP :
1290 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1292 case RTE_FLOW_ITEM_TYPE_VXLAN:
1293 size += sizeof(struct ibv_flow_spec_tunnel);
1294 detected_items |= MLX5_FLOW_LAYER_VXLAN;
1296 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1297 size += sizeof(struct ibv_flow_spec_tunnel);
1298 detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
1300 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1301 case RTE_FLOW_ITEM_TYPE_GRE:
1302 size += sizeof(struct ibv_flow_spec_gre);
1303 detected_items |= MLX5_FLOW_LAYER_GRE;
1305 case RTE_FLOW_ITEM_TYPE_MPLS:
1306 size += sizeof(struct ibv_flow_spec_mpls);
1307 detected_items |= MLX5_FLOW_LAYER_MPLS;
1310 case RTE_FLOW_ITEM_TYPE_GRE:
1311 size += sizeof(struct ibv_flow_spec_tunnel);
1312 detected_items |= MLX5_FLOW_LAYER_TUNNEL;
1319 *item_flags = detected_items;
1324 * Internal preparation function. Allocate mlx5_flow with the required size.
1325 * The required size is calculate based on the actions and items. This function
1326 * also returns the detected actions and items for later use.
1329 * Pointer to the flow attributes.
1331 * Pointer to the list of items.
1332 * @param[in] actions
1333 * Pointer to the list of actions.
1334 * @param[out] item_flags
1335 * Pointer to bit mask of all items detected.
1336 * @param[out] action_flags
1337 * Pointer to bit mask of all actions detected.
1339 * Pointer to the error structure.
1342 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1345 static struct mlx5_flow *
1346 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1347 const struct rte_flow_item items[],
1348 const struct rte_flow_action actions[],
1349 uint64_t *item_flags,
1350 uint64_t *action_flags,
1351 struct rte_flow_error *error)
1353 uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1354 struct mlx5_flow *flow;
1356 size += flow_verbs_get_actions_and_size(actions, action_flags);
1357 size += flow_verbs_get_items_and_size(items, item_flags);
1358 flow = rte_calloc(__func__, 1, size, 0);
1360 rte_flow_error_set(error, ENOMEM,
1361 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1362 "not enough memory to create flow");
1365 flow->verbs.attr = (void *)(flow + 1);
1367 (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
1372 * Fill the flow with verb spec.
1375 * Pointer to Ethernet device.
1376 * @param[in, out] dev_flow
1377 * Pointer to the mlx5 flow.
1379 * Pointer to the flow attributes.
1381 * Pointer to the list of items.
1382 * @param[in] actions
1383 * Pointer to the list of actions.
1385 * Pointer to the error structure.
1388 * 0 on success, else a negative errno value otherwise and rte_ernno is set.
1391 flow_verbs_translate(struct rte_eth_dev *dev,
1392 struct mlx5_flow *dev_flow,
1393 const struct rte_flow_attr *attr,
1394 const struct rte_flow_item items[],
1395 const struct rte_flow_action actions[],
1396 struct rte_flow_error *error)
1398 uint64_t action_flags = 0;
1399 uint64_t item_flags = 0;
1400 uint64_t priority = attr->priority;
1401 struct priv *priv = dev->data->dev_private;
1403 if (priority == MLX5_FLOW_PRIO_RSVD)
1404 priority = priv->config.flow_prio - 1;
1405 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1407 switch (actions->type) {
1408 case RTE_FLOW_ACTION_TYPE_VOID:
1410 case RTE_FLOW_ACTION_TYPE_FLAG:
1411 flow_verbs_translate_action_flag(actions,
1415 case RTE_FLOW_ACTION_TYPE_MARK:
1416 flow_verbs_translate_action_mark(actions,
1420 case RTE_FLOW_ACTION_TYPE_DROP:
1421 flow_verbs_translate_action_drop(&action_flags,
1424 case RTE_FLOW_ACTION_TYPE_QUEUE:
1425 flow_verbs_translate_action_queue(actions,
1429 case RTE_FLOW_ACTION_TYPE_RSS:
1430 flow_verbs_translate_action_rss(actions,
1434 case RTE_FLOW_ACTION_TYPE_COUNT:
1435 ret = flow_verbs_translate_action_count(dev,
1444 return rte_flow_error_set(error, ENOTSUP,
1445 RTE_FLOW_ERROR_TYPE_ACTION,
1447 "action not supported");
1450 dev_flow->flow->actions |= action_flags;
1451 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1452 switch (items->type) {
1453 case RTE_FLOW_ITEM_TYPE_VOID:
1455 case RTE_FLOW_ITEM_TYPE_ETH:
1456 flow_verbs_translate_item_eth(items, &item_flags,
1459 case RTE_FLOW_ITEM_TYPE_VLAN:
1460 flow_verbs_translate_item_vlan(items, &item_flags,
1463 case RTE_FLOW_ITEM_TYPE_IPV4:
1464 flow_verbs_translate_item_ipv4(items, &item_flags,
1467 case RTE_FLOW_ITEM_TYPE_IPV6:
1468 flow_verbs_translate_item_ipv6(items, &item_flags,
1471 case RTE_FLOW_ITEM_TYPE_UDP:
1472 flow_verbs_translate_item_udp(items, &item_flags,
1475 case RTE_FLOW_ITEM_TYPE_TCP:
1476 flow_verbs_translate_item_tcp(items, &item_flags,
1479 case RTE_FLOW_ITEM_TYPE_VXLAN:
1480 flow_verbs_translate_item_vxlan(items, &item_flags,
1483 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1484 flow_verbs_translate_item_vxlan_gpe(items, &item_flags,
1487 case RTE_FLOW_ITEM_TYPE_GRE:
1488 flow_verbs_translate_item_gre(items, &item_flags,
1491 case RTE_FLOW_ITEM_TYPE_MPLS:
1492 flow_verbs_translate_item_mpls(items, &item_flags,
1496 return rte_flow_error_set(error, ENOTSUP,
1497 RTE_FLOW_ERROR_TYPE_ITEM,
1499 "item not supported");
1502 dev_flow->verbs.attr->priority =
1503 mlx5_flow_adjust_priority(dev, priority,
1504 dev_flow->verbs.attr->priority);
1509 * Remove the flow from the NIC but keeps it in memory.
1512 * Pointer to the Ethernet device structure.
1513 * @param[in, out] flow
1514 * Pointer to flow structure.
1517 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1519 struct mlx5_flow_verbs *verbs;
1520 struct mlx5_flow *dev_flow;
1524 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1525 verbs = &dev_flow->verbs;
1527 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1531 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1532 mlx5_hrxq_drop_release(dev);
1534 mlx5_hrxq_release(dev, verbs->hrxq);
1538 if (flow->counter) {
1539 flow_verbs_counter_release(flow->counter);
1540 flow->counter = NULL;
1545 * Remove the flow from the NIC and the memory.
1548 * Pointer to the Ethernet device structure.
1549 * @param[in, out] flow
1550 * Pointer to flow structure.
1553 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1555 struct mlx5_flow *dev_flow;
1559 flow_verbs_remove(dev, flow);
1560 while (!LIST_EMPTY(&flow->dev_flows)) {
1561 dev_flow = LIST_FIRST(&flow->dev_flows);
1562 LIST_REMOVE(dev_flow, next);
1568 * Apply the flow to the NIC.
1571 * Pointer to the Ethernet device structure.
1572 * @param[in, out] flow
1573 * Pointer to flow structure.
1575 * Pointer to error structure.
1578 * 0 on success, a negative errno value otherwise and rte_errno is set.
1581 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1582 struct rte_flow_error *error)
1584 struct mlx5_flow_verbs *verbs;
1585 struct mlx5_flow *dev_flow;
1588 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1589 verbs = &dev_flow->verbs;
1590 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1591 verbs->hrxq = mlx5_hrxq_drop_new(dev);
1595 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1596 "cannot get drop hash queue");
1600 struct mlx5_hrxq *hrxq;
1602 hrxq = mlx5_hrxq_get(dev, flow->key,
1603 MLX5_RSS_HASH_KEY_LEN,
1606 flow->rss.queue_num);
1608 hrxq = mlx5_hrxq_new(dev, flow->key,
1609 MLX5_RSS_HASH_KEY_LEN,
1612 flow->rss.queue_num,
1614 MLX5_FLOW_LAYER_TUNNEL));
1618 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1619 "cannot get hash queue");
1624 verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1627 rte_flow_error_set(error, errno,
1628 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1630 "hardware refuses to create flow");
1636 err = rte_errno; /* Save rte_errno before cleanup. */
1637 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1638 verbs = &dev_flow->verbs;
1640 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1641 mlx5_hrxq_drop_release(dev);
1643 mlx5_hrxq_release(dev, verbs->hrxq);
1647 rte_errno = err; /* Restore rte_errno. */
1651 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1652 .validate = flow_verbs_validate,
1653 .prepare = flow_verbs_prepare,
1654 .translate = flow_verbs_translate,
1655 .apply = flow_verbs_apply,
1656 .remove = flow_verbs_remove,
1657 .destroy = flow_verbs_destroy,