1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
40 * Pointer to the Ethernet device structure.
42 * Indicate if this counter is shared with other flows.
47 * A pointer to the counter, NULL otherwise and rte_errno is set.
49 static struct mlx5_flow_counter *
50 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
52 struct priv *priv = dev->data->dev_private;
53 struct mlx5_flow_counter *cnt;
55 LIST_FOREACH(cnt, &priv->flow_counters, next) {
56 if (!cnt->shared || cnt->shared != shared)
63 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
65 struct mlx5_flow_counter tmpl = {
68 .cs = mlx5_glue->create_counter_set
70 &(struct ibv_counter_set_init_attr){
81 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
87 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
95 * Release a flow counter.
98 * Pointer to the counter handler.
101 flow_verbs_counter_release(struct mlx5_flow_counter *counter)
103 if (--counter->ref_cnt == 0) {
104 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
105 LIST_REMOVE(counter, next);
111 * Add a verbs item specification into @p flow.
113 * @param[in, out] flow
114 * Pointer to flow structure.
116 * Create specification.
118 * Size in bytes of the specification to copy.
121 flow_verbs_spec_add(struct mlx5_flow *flow, void *src, unsigned int size)
123 struct mlx5_flow_verbs *verbs = &flow->verbs;
128 dst = (void *)(verbs->specs + verbs->size);
129 memcpy(dst, src, size);
130 ++verbs->attr->num_of_specs;
136 * Adjust verbs hash fields according to the @p flow information.
138 * @param[in] dev_flow.
139 * Pointer to dev flow structure.
141 * 1 when the hash field is for a tunnel item.
142 * @param[in] layer_types
144 * @param[in] hash_fields
148 flow_verbs_hashfields_adjust(struct mlx5_flow *dev_flow,
149 int tunnel __rte_unused,
150 uint32_t layer_types, uint64_t hash_fields)
152 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
153 int rss_request_inner = dev_flow->flow->rss.level >= 2;
155 hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
156 if (rss_request_inner && !tunnel)
158 else if (rss_request_inner < 2 && tunnel)
161 if (!(dev_flow->flow->rss.types & layer_types))
163 dev_flow->verbs.hash_fields |= hash_fields;
167 * Convert the @p item into a Verbs specification. This function assumes that
168 * the input is valid and that there is space to insert the requested item
172 * Item specification.
173 * @param[in] item_flags
174 * Bit field with all detected items.
175 * @param[in, out] dev_flow
176 * Pointer to dev_flow structure.
179 flow_verbs_translate_item_eth(const struct rte_flow_item *item,
180 uint64_t *item_flags,
181 struct mlx5_flow *dev_flow)
183 const struct rte_flow_item_eth *spec = item->spec;
184 const struct rte_flow_item_eth *mask = item->mask;
185 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
186 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
187 struct ibv_flow_spec_eth eth = {
188 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
193 mask = &rte_flow_item_eth_mask;
197 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
198 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
199 eth.val.ether_type = spec->type;
200 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
201 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
202 eth.mask.ether_type = mask->type;
203 /* Remove unwanted bits from values. */
204 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
205 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
206 eth.val.src_mac[i] &= eth.mask.src_mac[i];
208 eth.val.ether_type &= eth.mask.ether_type;
209 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
211 flow_verbs_spec_add(dev_flow, ð, size);
212 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
213 MLX5_FLOW_LAYER_OUTER_L2;
217 * Update the VLAN tag in the Verbs Ethernet specification.
218 * This function assumes that the input is valid and there is space to add
219 * the requested item.
221 * @param[in, out] attr
222 * Pointer to Verbs attributes structure.
224 * Verbs structure containing the VLAN information to copy.
227 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
228 struct ibv_flow_spec_eth *eth)
231 const enum ibv_flow_spec_type search = eth->type;
232 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
233 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
235 for (i = 0; i != attr->num_of_specs; ++i) {
236 if (hdr->type == search) {
237 struct ibv_flow_spec_eth *e =
238 (struct ibv_flow_spec_eth *)hdr;
240 e->val.vlan_tag = eth->val.vlan_tag;
241 e->mask.vlan_tag = eth->mask.vlan_tag;
242 e->val.ether_type = eth->val.ether_type;
243 e->mask.ether_type = eth->mask.ether_type;
246 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
251 * Convert the @p item into a Verbs specification. This function assumes that
252 * the input is valid and that there is space to insert the requested item
256 * Item specification.
257 * @param[in, out] item_flags
258 * Bit mask that holds all detected items.
259 * @param[in, out] dev_flow
260 * Pointer to dev_flow structure.
263 flow_verbs_translate_item_vlan(const struct rte_flow_item *item,
264 uint64_t *item_flags,
265 struct mlx5_flow *dev_flow)
267 const struct rte_flow_item_vlan *spec = item->spec;
268 const struct rte_flow_item_vlan *mask = item->mask;
269 unsigned int size = sizeof(struct ibv_flow_spec_eth);
270 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
271 struct ibv_flow_spec_eth eth = {
272 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
275 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
276 MLX5_FLOW_LAYER_OUTER_L2;
279 mask = &rte_flow_item_vlan_mask;
281 eth.val.vlan_tag = spec->tci;
282 eth.mask.vlan_tag = mask->tci;
283 eth.val.vlan_tag &= eth.mask.vlan_tag;
284 eth.val.ether_type = spec->inner_type;
285 eth.mask.ether_type = mask->inner_type;
286 eth.val.ether_type &= eth.mask.ether_type;
288 if (!(*item_flags & l2m)) {
289 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
290 flow_verbs_spec_add(dev_flow, ð, size);
292 flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
293 size = 0; /* Only an update is done in eth specification. */
295 *item_flags |= tunnel ?
296 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
297 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
301 * Convert the @p item into a Verbs specification. This function assumes that
302 * the input is valid and that there is space to insert the requested item
306 * Item specification.
307 * @param[in, out] item_flags
308 * Bit mask that marks all detected items.
309 * @param[in, out] dev_flow
310 * Pointer to sepacific flow structure.
313 flow_verbs_translate_item_ipv4(const struct rte_flow_item *item,
314 uint64_t *item_flags,
315 struct mlx5_flow *dev_flow)
317 const struct rte_flow_item_ipv4 *spec = item->spec;
318 const struct rte_flow_item_ipv4 *mask = item->mask;
319 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
320 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
321 struct ibv_flow_spec_ipv4_ext ipv4 = {
322 .type = IBV_FLOW_SPEC_IPV4_EXT |
323 (tunnel ? IBV_FLOW_SPEC_INNER : 0),
328 mask = &rte_flow_item_ipv4_mask;
329 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
330 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
332 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
333 .src_ip = spec->hdr.src_addr,
334 .dst_ip = spec->hdr.dst_addr,
335 .proto = spec->hdr.next_proto_id,
336 .tos = spec->hdr.type_of_service,
338 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
339 .src_ip = mask->hdr.src_addr,
340 .dst_ip = mask->hdr.dst_addr,
341 .proto = mask->hdr.next_proto_id,
342 .tos = mask->hdr.type_of_service,
344 /* Remove unwanted bits from values. */
345 ipv4.val.src_ip &= ipv4.mask.src_ip;
346 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
347 ipv4.val.proto &= ipv4.mask.proto;
348 ipv4.val.tos &= ipv4.mask.tos;
350 flow_verbs_hashfields_adjust(dev_flow, tunnel,
351 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
352 ETH_RSS_NONFRAG_IPV4_TCP |
353 ETH_RSS_NONFRAG_IPV4_UDP |
354 ETH_RSS_NONFRAG_IPV4_OTHER),
355 (IBV_RX_HASH_SRC_IPV4 |
356 IBV_RX_HASH_DST_IPV4));
357 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
358 flow_verbs_spec_add(dev_flow, &ipv4, size);
362 * Convert the @p item into a Verbs specification. This function assumes that
363 * the input is valid and that there is space to insert the requested item
367 * Item specification.
368 * @param[in, out] item_flags
369 * Bit mask that marks all detected items.
370 * @param[in, out] dev_flow
371 * Pointer to sepacific flow structure.
374 flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
375 uint64_t *item_flags,
376 struct mlx5_flow *dev_flow)
378 const struct rte_flow_item_ipv6 *spec = item->spec;
379 const struct rte_flow_item_ipv6 *mask = item->mask;
380 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
381 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
382 struct ibv_flow_spec_ipv6 ipv6 = {
383 .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
388 mask = &rte_flow_item_ipv6_mask;
389 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
390 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
393 uint32_t vtc_flow_val;
394 uint32_t vtc_flow_mask;
396 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
397 RTE_DIM(ipv6.val.src_ip));
398 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
399 RTE_DIM(ipv6.val.dst_ip));
400 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
401 RTE_DIM(ipv6.mask.src_ip));
402 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
403 RTE_DIM(ipv6.mask.dst_ip));
404 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
405 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
406 ipv6.val.flow_label =
407 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
409 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
411 ipv6.val.next_hdr = spec->hdr.proto;
412 ipv6.val.hop_limit = spec->hdr.hop_limits;
413 ipv6.mask.flow_label =
414 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
416 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
418 ipv6.mask.next_hdr = mask->hdr.proto;
419 ipv6.mask.hop_limit = mask->hdr.hop_limits;
420 /* Remove unwanted bits from values. */
421 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
422 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
423 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
425 ipv6.val.flow_label &= ipv6.mask.flow_label;
426 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
427 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
428 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
430 flow_verbs_hashfields_adjust(dev_flow, tunnel,
431 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
432 ETH_RSS_NONFRAG_IPV6_TCP |
433 ETH_RSS_NONFRAG_IPV6_UDP |
435 ETH_RSS_IPV6_TCP_EX |
436 ETH_RSS_IPV6_UDP_EX |
437 ETH_RSS_NONFRAG_IPV6_OTHER),
438 (IBV_RX_HASH_SRC_IPV6 |
439 IBV_RX_HASH_DST_IPV6));
440 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
441 flow_verbs_spec_add(dev_flow, &ipv6, size);
445 * Convert the @p item into a Verbs specification. This function assumes that
446 * the input is valid and that there is space to insert the requested item
450 * Item specification.
451 * @param[in, out] item_flags
452 * Bit mask that marks all detected items.
453 * @param[in, out] dev_flow
454 * Pointer to sepacific flow structure.
457 flow_verbs_translate_item_udp(const struct rte_flow_item *item,
458 uint64_t *item_flags,
459 struct mlx5_flow *dev_flow)
461 const struct rte_flow_item_udp *spec = item->spec;
462 const struct rte_flow_item_udp *mask = item->mask;
463 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
464 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
465 struct ibv_flow_spec_tcp_udp udp = {
466 .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
471 mask = &rte_flow_item_udp_mask;
472 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
473 MLX5_FLOW_LAYER_OUTER_L4_UDP;
475 udp.val.dst_port = spec->hdr.dst_port;
476 udp.val.src_port = spec->hdr.src_port;
477 udp.mask.dst_port = mask->hdr.dst_port;
478 udp.mask.src_port = mask->hdr.src_port;
479 /* Remove unwanted bits from values. */
480 udp.val.src_port &= udp.mask.src_port;
481 udp.val.dst_port &= udp.mask.dst_port;
483 flow_verbs_hashfields_adjust(dev_flow,
485 (IBV_RX_HASH_SRC_PORT_UDP |
486 IBV_RX_HASH_DST_PORT_UDP));
487 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
488 flow_verbs_spec_add(dev_flow, &udp, size);
492 * Convert the @p item into a Verbs specification. This function assumes that
493 * the input is valid and that there is space to insert the requested item
497 * Item specification.
498 * @param[in, out] item_flags
499 * Bit mask that marks all detected items.
500 * @param[in, out] dev_flow
501 * Pointer to sepacific flow structure.
504 flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
505 uint64_t *item_flags,
506 struct mlx5_flow *dev_flow)
508 const struct rte_flow_item_tcp *spec = item->spec;
509 const struct rte_flow_item_tcp *mask = item->mask;
510 const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
511 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
512 struct ibv_flow_spec_tcp_udp tcp = {
513 .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
518 mask = &rte_flow_item_tcp_mask;
519 *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
520 MLX5_FLOW_LAYER_OUTER_L4_TCP;
522 tcp.val.dst_port = spec->hdr.dst_port;
523 tcp.val.src_port = spec->hdr.src_port;
524 tcp.mask.dst_port = mask->hdr.dst_port;
525 tcp.mask.src_port = mask->hdr.src_port;
526 /* Remove unwanted bits from values. */
527 tcp.val.src_port &= tcp.mask.src_port;
528 tcp.val.dst_port &= tcp.mask.dst_port;
530 flow_verbs_hashfields_adjust(dev_flow,
532 (IBV_RX_HASH_SRC_PORT_TCP |
533 IBV_RX_HASH_DST_PORT_TCP));
534 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
535 flow_verbs_spec_add(dev_flow, &tcp, size);
539 * Convert the @p item into a Verbs specification. This function assumes that
540 * the input is valid and that there is space to insert the requested item
544 * Item specification.
545 * @param[in, out] item_flags
546 * Bit mask that marks all detected items.
547 * @param[in, out] dev_flow
548 * Pointer to sepacific flow structure.
551 flow_verbs_translate_item_vxlan(const struct rte_flow_item *item,
552 uint64_t *item_flags,
553 struct mlx5_flow *dev_flow)
555 const struct rte_flow_item_vxlan *spec = item->spec;
556 const struct rte_flow_item_vxlan *mask = item->mask;
557 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
558 struct ibv_flow_spec_tunnel vxlan = {
559 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
565 } id = { .vlan_id = 0, };
568 mask = &rte_flow_item_vxlan_mask;
570 memcpy(&id.vni[1], spec->vni, 3);
571 vxlan.val.tunnel_id = id.vlan_id;
572 memcpy(&id.vni[1], mask->vni, 3);
573 vxlan.mask.tunnel_id = id.vlan_id;
574 /* Remove unwanted bits from values. */
575 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
577 flow_verbs_spec_add(dev_flow, &vxlan, size);
578 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
579 *item_flags |= MLX5_FLOW_LAYER_VXLAN;
583 * Convert the @p item into a Verbs specification. This function assumes that
584 * the input is valid and that there is space to insert the requested item
588 * Item specification.
589 * @param[in, out] item_flags
590 * Bit mask that marks all detected items.
591 * @param[in, out] dev_flow
592 * Pointer to sepacific flow structure.
595 flow_verbs_translate_item_vxlan_gpe(const struct rte_flow_item *item,
596 uint64_t *item_flags,
597 struct mlx5_flow *dev_flow)
599 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
600 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
601 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
602 struct ibv_flow_spec_tunnel vxlan_gpe = {
603 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
609 } id = { .vlan_id = 0, };
612 mask = &rte_flow_item_vxlan_gpe_mask;
614 memcpy(&id.vni[1], spec->vni, 3);
615 vxlan_gpe.val.tunnel_id = id.vlan_id;
616 memcpy(&id.vni[1], mask->vni, 3);
617 vxlan_gpe.mask.tunnel_id = id.vlan_id;
618 /* Remove unwanted bits from values. */
619 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
621 flow_verbs_spec_add(dev_flow, &vxlan_gpe, size);
622 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
623 *item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
627 * Update the protocol in Verbs IPv4/IPv6 spec.
629 * @param[in, out] attr
630 * Pointer to Verbs attributes structure.
632 * Specification type to search in order to update the IP protocol.
633 * @param[in] protocol
634 * Protocol value to set if none is present in the specification.
637 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
638 enum ibv_flow_spec_type search,
642 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
643 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
647 for (i = 0; i != attr->num_of_specs; ++i) {
648 if (hdr->type == search) {
650 struct ibv_flow_spec_ipv4_ext *ipv4;
651 struct ibv_flow_spec_ipv6 *ipv6;
655 case IBV_FLOW_SPEC_IPV4_EXT:
656 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
657 if (!ip.ipv4->val.proto) {
658 ip.ipv4->val.proto = protocol;
659 ip.ipv4->mask.proto = 0xff;
662 case IBV_FLOW_SPEC_IPV6:
663 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
664 if (!ip.ipv6->val.next_hdr) {
665 ip.ipv6->val.next_hdr = protocol;
666 ip.ipv6->mask.next_hdr = 0xff;
674 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
679 * Convert the @p item into a Verbs specification. This function assumes that
680 * the input is valid and that there is space to insert the requested item
684 * Item specification.
685 * @param[in, out] item_flags
686 * Bit mask that marks all detected items.
687 * @param[in, out] dev_flow
688 * Pointer to sepacific flow structure.
691 flow_verbs_translate_item_gre(const struct rte_flow_item *item __rte_unused,
692 uint64_t *item_flags,
693 struct mlx5_flow *dev_flow)
695 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
696 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
697 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
698 struct ibv_flow_spec_tunnel tunnel = {
699 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
703 const struct rte_flow_item_gre *spec = item->spec;
704 const struct rte_flow_item_gre *mask = item->mask;
705 unsigned int size = sizeof(struct ibv_flow_spec_gre);
706 struct ibv_flow_spec_gre tunnel = {
707 .type = IBV_FLOW_SPEC_GRE,
712 mask = &rte_flow_item_gre_mask;
714 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
715 tunnel.val.protocol = spec->protocol;
716 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
717 tunnel.mask.protocol = mask->protocol;
718 /* Remove unwanted bits from values. */
719 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
720 tunnel.val.protocol &= tunnel.mask.protocol;
721 tunnel.val.key &= tunnel.mask.key;
724 if (*item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
725 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
726 IBV_FLOW_SPEC_IPV4_EXT,
729 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
732 flow_verbs_spec_add(dev_flow, &tunnel, size);
733 verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
734 *item_flags |= MLX5_FLOW_LAYER_GRE;
738 * Convert the @p action into a Verbs specification. This function assumes that
739 * the input is valid and that there is space to insert the requested action
740 * into the flow. This function also return the action that was added.
743 * Item specification.
744 * @param[in, out] item_flags
745 * Bit mask that marks all detected items.
746 * @param[in, out] dev_flow
747 * Pointer to sepacific flow structure.
750 flow_verbs_translate_item_mpls(const struct rte_flow_item *item __rte_unused,
751 uint64_t *action_flags __rte_unused,
752 struct mlx5_flow *dev_flow __rte_unused)
754 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
755 const struct rte_flow_item_mpls *spec = item->spec;
756 const struct rte_flow_item_mpls *mask = item->mask;
757 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
758 struct ibv_flow_spec_mpls mpls = {
759 .type = IBV_FLOW_SPEC_MPLS,
764 mask = &rte_flow_item_mpls_mask;
766 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
767 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
768 /* Remove unwanted bits from values. */
769 mpls.val.label &= mpls.mask.label;
771 flow_verbs_spec_add(dev_flow, &mpls, size);
772 dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
773 *action_flags |= MLX5_FLOW_LAYER_MPLS;
778 * Convert the @p action into a Verbs specification. This function assumes that
779 * the input is valid and that there is space to insert the requested action
780 * into the flow. This function also return the action that was added.
782 * @param[in, out] action_flags
783 * Pointer to the detected actions.
784 * @param[in] dev_flow
785 * Pointer to mlx5_flow.
788 flow_verbs_translate_action_drop(uint64_t *action_flags,
789 struct mlx5_flow *dev_flow)
791 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
792 struct ibv_flow_spec_action_drop drop = {
793 .type = IBV_FLOW_SPEC_ACTION_DROP,
797 flow_verbs_spec_add(dev_flow, &drop, size);
798 *action_flags |= MLX5_FLOW_ACTION_DROP;
802 * Convert the @p action into a Verbs specification. This function assumes that
803 * the input is valid and that there is space to insert the requested action
804 * into the flow. This function also return the action that was added.
807 * Action configuration.
808 * @param[in, out] action_flags
809 * Pointer to the detected actions.
810 * @param[in] dev_flow
811 * Pointer to mlx5_flow.
814 flow_verbs_translate_action_queue(const struct rte_flow_action *action,
815 uint64_t *action_flags,
816 struct mlx5_flow *dev_flow)
818 const struct rte_flow_action_queue *queue = action->conf;
819 struct rte_flow *flow = dev_flow->flow;
822 (*flow->queue)[0] = queue->index;
823 flow->rss.queue_num = 1;
824 *action_flags |= MLX5_FLOW_ACTION_QUEUE;
828 * Convert the @p action into a Verbs specification. This function assumes that
829 * the input is valid and that there is space to insert the requested action
830 * into the flow. This function also return the action that was added.
833 * Action configuration.
834 * @param[in, out] action_flags
835 * Pointer to the detected actions.
836 * @param[in] dev_flow
837 * Pointer to mlx5_flow.
840 flow_verbs_translate_action_rss(const struct rte_flow_action *action,
841 uint64_t *action_flags,
842 struct mlx5_flow *dev_flow)
844 const struct rte_flow_action_rss *rss = action->conf;
845 struct rte_flow *flow = dev_flow->flow;
848 memcpy((*flow->queue), rss->queue,
849 rss->queue_num * sizeof(uint16_t));
850 flow->rss.queue_num = rss->queue_num;
851 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
852 flow->rss.types = rss->types;
853 flow->rss.level = rss->level;
854 *action_flags |= MLX5_FLOW_ACTION_RSS;
858 * Convert the @p action into a Verbs specification. This function assumes that
859 * the input is valid and that there is space to insert the requested action
860 * into the flow. This function also return the action that was added.
863 * Action configuration.
864 * @param[in, out] action_flags
865 * Pointer to the detected actions.
866 * @param[in] dev_flow
867 * Pointer to mlx5_flow.
870 flow_verbs_translate_action_flag
871 (const struct rte_flow_action *action __rte_unused,
872 uint64_t *action_flags,
873 struct mlx5_flow *dev_flow)
875 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
876 struct ibv_flow_spec_action_tag tag = {
877 .type = IBV_FLOW_SPEC_ACTION_TAG,
879 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
881 *action_flags |= MLX5_FLOW_ACTION_MARK;
882 flow_verbs_spec_add(dev_flow, &tag, size);
886 * Update verbs specification to modify the flag to mark.
888 * @param[in, out] verbs
889 * Pointer to the mlx5_flow_verbs structure.
891 * Mark identifier to replace the flag.
894 flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
896 struct ibv_spec_header *hdr;
901 /* Update Verbs specification. */
902 hdr = (struct ibv_spec_header *)verbs->specs;
905 for (i = 0; i != verbs->attr->num_of_specs; ++i) {
906 if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
907 struct ibv_flow_spec_action_tag *t =
908 (struct ibv_flow_spec_action_tag *)hdr;
910 t->tag_id = mlx5_flow_mark_set(mark_id);
912 hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
917 * Convert the @p action into a Verbs specification. This function assumes that
918 * the input is valid and that there is space to insert the requested action
919 * into the flow. This function also return the action that was added.
922 * Action configuration.
923 * @param[in, out] action_flags
924 * Pointer to the detected actions.
925 * @param[in] dev_flow
926 * Pointer to mlx5_flow.
929 flow_verbs_translate_action_mark(const struct rte_flow_action *action,
930 uint64_t *action_flags,
931 struct mlx5_flow *dev_flow)
933 const struct rte_flow_action_mark *mark = action->conf;
934 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
935 struct ibv_flow_spec_action_tag tag = {
936 .type = IBV_FLOW_SPEC_ACTION_TAG,
939 struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
941 if (*action_flags & MLX5_FLOW_ACTION_FLAG) {
942 flow_verbs_mark_update(verbs, mark->id);
945 tag.tag_id = mlx5_flow_mark_set(mark->id);
946 flow_verbs_spec_add(dev_flow, &tag, size);
948 *action_flags |= MLX5_FLOW_ACTION_MARK;
952 * Convert the @p action into a Verbs specification. This function assumes that
953 * the input is valid and that there is space to insert the requested action
954 * into the flow. This function also return the action that was added.
957 * Pointer to the Ethernet device structure.
959 * Action configuration.
960 * @param[in, out] action_flags
961 * Pointer to the detected actions.
962 * @param[in] dev_flow
963 * Pointer to mlx5_flow.
965 * Pointer to error structure.
968 * 0 On success else a negative errno value is returned and rte_errno is set.
971 flow_verbs_translate_action_count(struct rte_eth_dev *dev,
972 const struct rte_flow_action *action,
973 uint64_t *action_flags,
974 struct mlx5_flow *dev_flow,
975 struct rte_flow_error *error)
977 const struct rte_flow_action_count *count = action->conf;
978 struct rte_flow *flow = dev_flow->flow;
979 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
980 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
981 struct ibv_flow_spec_counter_action counter = {
982 .type = IBV_FLOW_SPEC_ACTION_COUNT,
987 if (!flow->counter) {
988 flow->counter = flow_verbs_counter_new(dev, count->shared,
991 return rte_flow_error_set(error, rte_errno,
992 RTE_FLOW_ERROR_TYPE_ACTION,
997 *action_flags |= MLX5_FLOW_ACTION_COUNT;
998 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
999 counter.counter_set_handle = flow->counter->cs->handle;
1000 flow_verbs_spec_add(dev_flow, &counter, size);
1006 * Internal validation function. For validating both actions and items.
1009 * Pointer to the Ethernet device structure.
1011 * Pointer to the flow attributes.
1013 * Pointer to the list of items.
1014 * @param[in] actions
1015 * Pointer to the list of actions.
1017 * Pointer to the error structure.
1020 * 0 on success, a negative errno value otherwise and rte_errno is set.
1023 flow_verbs_validate(struct rte_eth_dev *dev,
1024 const struct rte_flow_attr *attr,
1025 const struct rte_flow_item items[],
1026 const struct rte_flow_action actions[],
1027 struct rte_flow_error *error)
1030 uint32_t action_flags = 0;
1031 uint32_t item_flags = 0;
1033 uint8_t next_protocol = 0xff;
1037 ret = mlx5_flow_validate_attributes(dev, attr, error);
1040 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1042 switch (items->type) {
1043 case RTE_FLOW_ITEM_TYPE_VOID:
1045 case RTE_FLOW_ITEM_TYPE_ETH:
1046 ret = mlx5_flow_validate_item_eth(items, item_flags,
1050 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1051 MLX5_FLOW_LAYER_OUTER_L2;
1053 case RTE_FLOW_ITEM_TYPE_VLAN:
1054 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1058 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1059 MLX5_FLOW_LAYER_OUTER_VLAN;
1061 case RTE_FLOW_ITEM_TYPE_IPV4:
1062 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1066 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1067 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1068 if (items->mask != NULL &&
1069 ((const struct rte_flow_item_ipv4 *)
1070 items->mask)->hdr.next_proto_id)
1072 ((const struct rte_flow_item_ipv4 *)
1073 (items->spec))->hdr.next_proto_id;
1075 case RTE_FLOW_ITEM_TYPE_IPV6:
1076 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1080 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1081 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1082 if (items->mask != NULL &&
1083 ((const struct rte_flow_item_ipv6 *)
1084 items->mask)->hdr.proto)
1086 ((const struct rte_flow_item_ipv6 *)
1087 items->spec)->hdr.proto;
1089 case RTE_FLOW_ITEM_TYPE_UDP:
1090 ret = mlx5_flow_validate_item_udp(items, item_flags,
1095 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1096 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1098 case RTE_FLOW_ITEM_TYPE_TCP:
1099 ret = mlx5_flow_validate_item_tcp(items, item_flags,
1100 next_protocol, error);
1103 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1104 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1106 case RTE_FLOW_ITEM_TYPE_VXLAN:
1107 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1111 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1113 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1114 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1119 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1121 case RTE_FLOW_ITEM_TYPE_GRE:
1122 ret = mlx5_flow_validate_item_gre(items, item_flags,
1123 next_protocol, error);
1126 item_flags |= MLX5_FLOW_LAYER_GRE;
1128 case RTE_FLOW_ITEM_TYPE_MPLS:
1129 ret = mlx5_flow_validate_item_mpls(items, item_flags,
1134 if (next_protocol != 0xff &&
1135 next_protocol != IPPROTO_MPLS)
1136 return rte_flow_error_set
1138 RTE_FLOW_ERROR_TYPE_ITEM, items,
1139 "protocol filtering not compatible"
1140 " with MPLS layer");
1141 item_flags |= MLX5_FLOW_LAYER_MPLS;
1144 return rte_flow_error_set(error, ENOTSUP,
1145 RTE_FLOW_ERROR_TYPE_ITEM,
1146 NULL, "item not supported");
1149 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1150 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1151 switch (actions->type) {
1152 case RTE_FLOW_ACTION_TYPE_VOID:
1154 case RTE_FLOW_ACTION_TYPE_FLAG:
1155 ret = mlx5_flow_validate_action_flag(action_flags,
1159 action_flags |= MLX5_FLOW_ACTION_FLAG;
1161 case RTE_FLOW_ACTION_TYPE_MARK:
1162 ret = mlx5_flow_validate_action_mark(actions,
1167 action_flags |= MLX5_FLOW_ACTION_MARK;
1169 case RTE_FLOW_ACTION_TYPE_DROP:
1170 ret = mlx5_flow_validate_action_drop(action_flags,
1174 action_flags |= MLX5_FLOW_ACTION_DROP;
1176 case RTE_FLOW_ACTION_TYPE_QUEUE:
1177 ret = mlx5_flow_validate_action_queue(actions,
1182 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1184 case RTE_FLOW_ACTION_TYPE_RSS:
1185 ret = mlx5_flow_validate_action_rss(actions,
1190 action_flags |= MLX5_FLOW_ACTION_RSS;
1192 case RTE_FLOW_ACTION_TYPE_COUNT:
1193 ret = mlx5_flow_validate_action_count(dev, error);
1196 action_flags |= MLX5_FLOW_ACTION_COUNT;
1199 return rte_flow_error_set(error, ENOTSUP,
1200 RTE_FLOW_ERROR_TYPE_ACTION,
1202 "action not supported");
1205 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1206 return rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1208 "no fate action is found");
1213 * Calculate the required bytes that are needed for the action part of the verbs
1214 * flow, in addtion returns bit-fields with all the detected action, in order to
1215 * avoid another interation over the actions.
1217 * @param[in] actions
1218 * Pointer to the list of actions.
1219 * @param[out] action_flags
1220 * Pointer to the detected actions.
1223 * The size of the memory needed for all actions.
1226 flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
1227 uint64_t *action_flags)
1230 uint64_t detected_actions = 0;
1232 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1233 switch (actions->type) {
1234 case RTE_FLOW_ACTION_TYPE_VOID:
1236 case RTE_FLOW_ACTION_TYPE_FLAG:
1237 size += sizeof(struct ibv_flow_spec_action_tag);
1238 detected_actions |= MLX5_FLOW_ACTION_FLAG;
1240 case RTE_FLOW_ACTION_TYPE_MARK:
1241 size += sizeof(struct ibv_flow_spec_action_tag);
1242 detected_actions |= MLX5_FLOW_ACTION_MARK;
1244 case RTE_FLOW_ACTION_TYPE_DROP:
1245 size += sizeof(struct ibv_flow_spec_action_drop);
1246 detected_actions |= MLX5_FLOW_ACTION_DROP;
1248 case RTE_FLOW_ACTION_TYPE_QUEUE:
1249 detected_actions |= MLX5_FLOW_ACTION_QUEUE;
1251 case RTE_FLOW_ACTION_TYPE_RSS:
1252 detected_actions |= MLX5_FLOW_ACTION_RSS;
1254 case RTE_FLOW_ACTION_TYPE_COUNT:
1255 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
1256 size += sizeof(struct ibv_flow_spec_counter_action);
1258 detected_actions |= MLX5_FLOW_ACTION_COUNT;
1264 *action_flags = detected_actions;
1269 * Calculate the required bytes that are needed for the item part of the verbs
1270 * flow, in addtion returns bit-fields with all the detected action, in order to
1271 * avoid another interation over the actions.
1273 * @param[in] actions
1274 * Pointer to the list of items.
1275 * @param[in, out] item_flags
1276 * Pointer to the detected items.
1279 * The size of the memory needed for all items.
1282 flow_verbs_get_items_and_size(const struct rte_flow_item items[],
1283 uint64_t *item_flags)
1286 uint64_t detected_items = 0;
1287 const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
1289 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1290 switch (items->type) {
1291 case RTE_FLOW_ITEM_TYPE_VOID:
1293 case RTE_FLOW_ITEM_TYPE_ETH:
1294 size += sizeof(struct ibv_flow_spec_eth);
1295 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1296 MLX5_FLOW_LAYER_OUTER_L2;
1298 case RTE_FLOW_ITEM_TYPE_VLAN:
1299 size += sizeof(struct ibv_flow_spec_eth);
1300 detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1301 MLX5_FLOW_LAYER_OUTER_VLAN;
1303 case RTE_FLOW_ITEM_TYPE_IPV4:
1304 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1305 detected_items |= tunnel ?
1306 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1307 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1309 case RTE_FLOW_ITEM_TYPE_IPV6:
1310 size += sizeof(struct ibv_flow_spec_ipv6);
1311 detected_items |= tunnel ?
1312 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1313 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1315 case RTE_FLOW_ITEM_TYPE_UDP:
1316 size += sizeof(struct ibv_flow_spec_tcp_udp);
1317 detected_items |= tunnel ?
1318 MLX5_FLOW_LAYER_INNER_L4_UDP :
1319 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1321 case RTE_FLOW_ITEM_TYPE_TCP:
1322 size += sizeof(struct ibv_flow_spec_tcp_udp);
1323 detected_items |= tunnel ?
1324 MLX5_FLOW_LAYER_INNER_L4_TCP :
1325 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1327 case RTE_FLOW_ITEM_TYPE_VXLAN:
1328 size += sizeof(struct ibv_flow_spec_tunnel);
1329 detected_items |= MLX5_FLOW_LAYER_VXLAN;
1331 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1332 size += sizeof(struct ibv_flow_spec_tunnel);
1333 detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
1335 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1336 case RTE_FLOW_ITEM_TYPE_GRE:
1337 size += sizeof(struct ibv_flow_spec_gre);
1338 detected_items |= MLX5_FLOW_LAYER_GRE;
1340 case RTE_FLOW_ITEM_TYPE_MPLS:
1341 size += sizeof(struct ibv_flow_spec_mpls);
1342 detected_items |= MLX5_FLOW_LAYER_MPLS;
1345 case RTE_FLOW_ITEM_TYPE_GRE:
1346 size += sizeof(struct ibv_flow_spec_tunnel);
1347 detected_items |= MLX5_FLOW_LAYER_TUNNEL;
1354 *item_flags = detected_items;
1359 * Internal preparation function. Allocate mlx5_flow with the required size.
1360 * The required size is calculate based on the actions and items. This function
1361 * also returns the detected actions and items for later use.
1364 * Pointer to the flow attributes.
1366 * Pointer to the list of items.
1367 * @param[in] actions
1368 * Pointer to the list of actions.
1369 * @param[out] item_flags
1370 * Pointer to bit mask of all items detected.
1371 * @param[out] action_flags
1372 * Pointer to bit mask of all actions detected.
1374 * Pointer to the error structure.
1377 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1380 static struct mlx5_flow *
1381 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1382 const struct rte_flow_item items[],
1383 const struct rte_flow_action actions[],
1384 uint64_t *item_flags,
1385 uint64_t *action_flags,
1386 struct rte_flow_error *error)
1388 uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1389 struct mlx5_flow *flow;
1391 size += flow_verbs_get_actions_and_size(actions, action_flags);
1392 size += flow_verbs_get_items_and_size(items, item_flags);
1393 flow = rte_calloc(__func__, 1, size, 0);
1395 rte_flow_error_set(error, ENOMEM,
1396 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1397 "not enough memory to create flow");
1400 flow->verbs.attr = (void *)(flow + 1);
1402 (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
1407 * Fill the flow with verb spec.
1410 * Pointer to Ethernet device.
1411 * @param[in, out] dev_flow
1412 * Pointer to the mlx5 flow.
1414 * Pointer to the flow attributes.
1416 * Pointer to the list of items.
1417 * @param[in] actions
1418 * Pointer to the list of actions.
1420 * Pointer to the error structure.
1423 * 0 on success, else a negative errno value otherwise and rte_ernno is set.
1426 flow_verbs_translate(struct rte_eth_dev *dev,
1427 struct mlx5_flow *dev_flow,
1428 const struct rte_flow_attr *attr,
1429 const struct rte_flow_item items[],
1430 const struct rte_flow_action actions[],
1431 struct rte_flow_error *error)
1433 uint64_t action_flags = 0;
1434 uint64_t item_flags = 0;
1435 uint64_t priority = attr->priority;
1436 struct priv *priv = dev->data->dev_private;
1438 if (priority == MLX5_FLOW_PRIO_RSVD)
1439 priority = priv->config.flow_prio - 1;
1440 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1442 switch (actions->type) {
1443 case RTE_FLOW_ACTION_TYPE_VOID:
1445 case RTE_FLOW_ACTION_TYPE_FLAG:
1446 flow_verbs_translate_action_flag(actions,
1450 case RTE_FLOW_ACTION_TYPE_MARK:
1451 flow_verbs_translate_action_mark(actions,
1455 case RTE_FLOW_ACTION_TYPE_DROP:
1456 flow_verbs_translate_action_drop(&action_flags,
1459 case RTE_FLOW_ACTION_TYPE_QUEUE:
1460 flow_verbs_translate_action_queue(actions,
1464 case RTE_FLOW_ACTION_TYPE_RSS:
1465 flow_verbs_translate_action_rss(actions,
1469 case RTE_FLOW_ACTION_TYPE_COUNT:
1470 ret = flow_verbs_translate_action_count(dev,
1479 return rte_flow_error_set(error, ENOTSUP,
1480 RTE_FLOW_ERROR_TYPE_ACTION,
1482 "action not supported");
1485 dev_flow->flow->actions |= action_flags;
1486 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1487 switch (items->type) {
1488 case RTE_FLOW_ITEM_TYPE_VOID:
1490 case RTE_FLOW_ITEM_TYPE_ETH:
1491 flow_verbs_translate_item_eth(items, &item_flags,
1494 case RTE_FLOW_ITEM_TYPE_VLAN:
1495 flow_verbs_translate_item_vlan(items, &item_flags,
1498 case RTE_FLOW_ITEM_TYPE_IPV4:
1499 flow_verbs_translate_item_ipv4(items, &item_flags,
1502 case RTE_FLOW_ITEM_TYPE_IPV6:
1503 flow_verbs_translate_item_ipv6(items, &item_flags,
1506 case RTE_FLOW_ITEM_TYPE_UDP:
1507 flow_verbs_translate_item_udp(items, &item_flags,
1510 case RTE_FLOW_ITEM_TYPE_TCP:
1511 flow_verbs_translate_item_tcp(items, &item_flags,
1514 case RTE_FLOW_ITEM_TYPE_VXLAN:
1515 flow_verbs_translate_item_vxlan(items, &item_flags,
1518 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1519 flow_verbs_translate_item_vxlan_gpe(items, &item_flags,
1522 case RTE_FLOW_ITEM_TYPE_GRE:
1523 flow_verbs_translate_item_gre(items, &item_flags,
1526 case RTE_FLOW_ITEM_TYPE_MPLS:
1527 flow_verbs_translate_item_mpls(items, &item_flags,
1531 return rte_flow_error_set(error, ENOTSUP,
1532 RTE_FLOW_ERROR_TYPE_ITEM,
1534 "item not supported");
1537 dev_flow->verbs.attr->priority =
1538 mlx5_flow_adjust_priority(dev, priority,
1539 dev_flow->verbs.attr->priority);
1544 * Remove the flow from the NIC but keeps it in memory.
1547 * Pointer to the Ethernet device structure.
1548 * @param[in, out] flow
1549 * Pointer to flow structure.
1552 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1554 struct mlx5_flow_verbs *verbs;
1555 struct mlx5_flow *dev_flow;
1559 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1560 verbs = &dev_flow->verbs;
1562 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1566 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1567 mlx5_hrxq_drop_release(dev);
1569 mlx5_hrxq_release(dev, verbs->hrxq);
1573 if (flow->counter) {
1574 flow_verbs_counter_release(flow->counter);
1575 flow->counter = NULL;
1580 * Remove the flow from the NIC and the memory.
1583 * Pointer to the Ethernet device structure.
1584 * @param[in, out] flow
1585 * Pointer to flow structure.
1588 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1590 struct mlx5_flow *dev_flow;
1594 flow_verbs_remove(dev, flow);
1595 while (!LIST_EMPTY(&flow->dev_flows)) {
1596 dev_flow = LIST_FIRST(&flow->dev_flows);
1597 LIST_REMOVE(dev_flow, next);
1603 * Apply the flow to the NIC.
1606 * Pointer to the Ethernet device structure.
1607 * @param[in, out] flow
1608 * Pointer to flow structure.
1610 * Pointer to error structure.
1613 * 0 on success, a negative errno value otherwise and rte_errno is set.
1616 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1617 struct rte_flow_error *error)
1619 struct mlx5_flow_verbs *verbs;
1620 struct mlx5_flow *dev_flow;
1623 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1624 verbs = &dev_flow->verbs;
1625 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1626 verbs->hrxq = mlx5_hrxq_drop_new(dev);
1630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1631 "cannot get drop hash queue");
1635 struct mlx5_hrxq *hrxq;
1637 hrxq = mlx5_hrxq_get(dev, flow->key,
1638 MLX5_RSS_HASH_KEY_LEN,
1641 flow->rss.queue_num);
1643 hrxq = mlx5_hrxq_new(dev, flow->key,
1644 MLX5_RSS_HASH_KEY_LEN,
1647 flow->rss.queue_num,
1649 MLX5_FLOW_LAYER_TUNNEL));
1653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1654 "cannot get hash queue");
1659 verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1662 rte_flow_error_set(error, errno,
1663 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1665 "hardware refuses to create flow");
1671 err = rte_errno; /* Save rte_errno before cleanup. */
1672 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1673 verbs = &dev_flow->verbs;
1675 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1676 mlx5_hrxq_drop_release(dev);
1678 mlx5_hrxq_release(dev, verbs->hrxq);
1682 rte_errno = err; /* Restore rte_errno. */
1687 mlx5_flow_verbs_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops)
1689 *flow_ops = (struct mlx5_flow_driver_ops) {
1690 .validate = flow_verbs_validate,
1691 .prepare = flow_verbs_prepare,
1692 .translate = flow_verbs_translate,
1693 .apply = flow_verbs_apply,
1694 .remove = flow_verbs_remove,
1695 .destroy = flow_verbs_destroy,