1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
19 #include <mlx5_glue.h>
21 #include <mlx5_malloc.h>
23 #include "mlx5_defs.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rxtx.h"
28 #define VERBS_SPEC_INNER(item_flags) \
29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 { 9, 10, 11 }, { 12, 13, 14 },
43 * Discover the maximum number of priority available.
46 * Pointer to the Ethernet device structure.
49 * number of supported flow priority on success, a negative errno
50 * value otherwise and rte_errno is set.
53 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
55 struct mlx5_priv *priv = dev->data->dev_private;
57 struct ibv_flow_attr attr;
58 struct ibv_flow_spec_eth eth;
59 struct ibv_flow_spec_action_drop drop;
63 .port = (uint8_t)priv->dev_port,
66 .type = IBV_FLOW_SPEC_ETH,
67 .size = sizeof(struct ibv_flow_spec_eth),
70 .size = sizeof(struct ibv_flow_spec_action_drop),
71 .type = IBV_FLOW_SPEC_ACTION_DROP,
74 struct ibv_flow *flow;
75 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
76 uint16_t vprio[] = { 8, 16 };
84 for (i = 0; i != RTE_DIM(vprio); i++) {
85 flow_attr.attr.priority = vprio[i] - 1;
86 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
89 claim_zero(mlx5_glue->destroy_flow(flow));
92 mlx5_hrxq_drop_release(dev);
95 priority = RTE_DIM(priority_map_3);
98 priority = RTE_DIM(priority_map_5);
103 "port %u verbs maximum priority: %d expected 8/16",
104 dev->data->port_id, priority);
107 DRV_LOG(INFO, "port %u flow maximum priority: %d",
108 dev->data->port_id, priority);
113 * Adjust flow priority based on the highest layer and the request priority.
116 * Pointer to the Ethernet device structure.
117 * @param[in] priority
118 * The rule base priority.
119 * @param[in] subpriority
120 * The priority based on the items.
126 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
127 uint32_t subpriority)
130 struct mlx5_priv *priv = dev->data->dev_private;
132 switch (priv->config.flow_prio) {
133 case RTE_DIM(priority_map_3):
134 res = priority_map_3[priority][subpriority];
136 case RTE_DIM(priority_map_5):
137 res = priority_map_5[priority][subpriority];
144 * Get Verbs flow counter by index.
147 * Pointer to the Ethernet device structure.
149 * mlx5 flow counter index in the container.
151 * mlx5 flow counter pool in the container,
154 * A pointer to the counter, NULL otherwise.
156 static struct mlx5_flow_counter *
157 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
159 struct mlx5_flow_counter_pool **ppool)
161 struct mlx5_priv *priv = dev->data->dev_private;
162 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0);
163 struct mlx5_flow_counter_pool *pool;
166 pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
170 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
174 * Create Verbs flow counter with Verbs library.
177 * Pointer to the Ethernet device structure.
178 * @param[in, out] counter
179 * mlx5 flow counter object, contains the counter id,
180 * handle of created Verbs flow counter is returned
181 * in cs field (if counters are supported).
184 * 0 On success else a negative errno value is returned
185 * and rte_errno is set.
188 flow_verbs_counter_create(struct rte_eth_dev *dev,
189 struct mlx5_flow_counter_ext *counter)
191 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
192 struct mlx5_priv *priv = dev->data->dev_private;
193 struct ibv_context *ctx = priv->sh->ctx;
194 struct ibv_counter_set_init_attr init = {
195 .counter_set_id = counter->id};
197 counter->cs = mlx5_glue->create_counter_set(ctx, &init);
203 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
204 struct mlx5_priv *priv = dev->data->dev_private;
205 struct ibv_context *ctx = priv->sh->ctx;
206 struct ibv_counters_init_attr init = {0};
207 struct ibv_counter_attach_attr attach;
210 memset(&attach, 0, sizeof(attach));
211 counter->cs = mlx5_glue->create_counters(ctx, &init);
216 attach.counter_desc = IBV_COUNTER_PACKETS;
218 ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
220 attach.counter_desc = IBV_COUNTER_BYTES;
222 ret = mlx5_glue->attach_counters
223 (counter->cs, &attach, NULL);
226 claim_zero(mlx5_glue->destroy_counters(counter->cs));
241 * Get a flow counter.
244 * Pointer to the Ethernet device structure.
246 * Indicate if this counter is shared with other flows.
248 * Counter identifier.
251 * Index to the counter, 0 otherwise and rte_errno is set.
254 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
256 struct mlx5_priv *priv = dev->data->dev_private;
257 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0);
258 struct mlx5_flow_counter_pool *pool = NULL;
259 struct mlx5_flow_counter_ext *cnt_ext = NULL;
260 struct mlx5_flow_counter *cnt = NULL;
261 uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
267 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
268 pool = cont->pools[pool_idx];
269 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
270 cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
271 if (cnt_ext->shared && cnt_ext->id == id) {
273 return MLX5_MAKE_CNT_IDX(pool_idx, i);
278 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
279 pool = cont->pools[pool_idx];
282 cnt = TAILQ_FIRST(&pool->counters[0]);
287 struct mlx5_flow_counter_pool **pools;
290 if (n_valid == cont->n) {
291 /* Resize the container pool array. */
292 size = sizeof(struct mlx5_flow_counter_pool *) *
293 (n_valid + MLX5_CNT_CONTAINER_RESIZE);
294 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
299 memcpy(pools, cont->pools,
300 sizeof(struct mlx5_flow_counter_pool *) *
302 mlx5_free(cont->pools);
305 cont->n += MLX5_CNT_CONTAINER_RESIZE;
307 /* Allocate memory for new pool*/
308 size = sizeof(*pool) + (sizeof(*cnt_ext) + sizeof(*cnt)) *
309 MLX5_COUNTERS_PER_POOL;
310 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
313 pool->type |= CNT_POOL_TYPE_EXT;
314 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
315 cnt = MLX5_POOL_GET_CNT(pool, i);
316 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
318 cnt = MLX5_POOL_GET_CNT(pool, 0);
319 cont->pools[n_valid] = pool;
321 rte_atomic16_add(&cont->n_valid, 1);
322 TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
324 i = MLX5_CNT_ARRAY_IDX(pool, cnt);
325 cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
327 cnt_ext->shared = shared;
328 cnt_ext->ref_cnt = 1;
331 /* Create counter with Verbs. */
332 ret = flow_verbs_counter_create(dev, cnt_ext);
334 TAILQ_REMOVE(&pool->counters[0], cnt, next);
335 return MLX5_MAKE_CNT_IDX(pool_idx, i);
337 /* Some error occurred in Verbs library. */
343 * Release a flow counter.
346 * Pointer to the Ethernet device structure.
348 * Index to the counter handler.
351 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
353 struct mlx5_flow_counter_pool *pool;
354 struct mlx5_flow_counter *cnt;
355 struct mlx5_flow_counter_ext *cnt_ext;
357 cnt = flow_verbs_counter_get_by_idx(dev, counter,
359 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
360 if (--cnt_ext->ref_cnt == 0) {
361 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
362 claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs));
364 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
365 claim_zero(mlx5_glue->destroy_counters(cnt_ext->cs));
368 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
373 * Query a flow counter via Verbs library call.
375 * @see rte_flow_query()
379 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
380 struct rte_flow *flow, void *data,
381 struct rte_flow_error *error)
383 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
384 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
386 struct mlx5_flow_counter_pool *pool;
387 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
388 (dev, flow->counter, &pool);
389 struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT
391 struct rte_flow_query_count *qc = data;
392 uint64_t counters[2] = {0, 0};
393 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
394 struct ibv_query_counter_set_attr query_cs_attr = {
396 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
398 struct ibv_counter_set_data query_out = {
400 .outlen = 2 * sizeof(uint64_t),
402 int err = mlx5_glue->query_counter_set(&query_cs_attr,
404 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
405 int err = mlx5_glue->query_counters
406 (cnt_ext->cs, counters,
408 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
411 return rte_flow_error_set
413 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
415 "cannot read counter");
418 qc->hits = counters[0] - cnt->hits;
419 qc->bytes = counters[1] - cnt->bytes;
421 cnt->hits = counters[0];
422 cnt->bytes = counters[1];
426 return rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
429 "flow does not have counter");
433 return rte_flow_error_set(error, ENOTSUP,
434 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
436 "counters are not available");
441 * Add a verbs item specification into @p verbs.
444 * Pointer to verbs structure.
446 * Create specification.
448 * Size in bytes of the specification to copy.
451 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
452 void *src, unsigned int size)
458 MLX5_ASSERT(verbs->specs);
459 dst = (void *)(verbs->specs + verbs->size);
460 memcpy(dst, src, size);
461 ++verbs->attr.num_of_specs;
466 * Convert the @p item into a Verbs specification. This function assumes that
467 * the input is valid and that there is space to insert the requested item
470 * @param[in, out] dev_flow
471 * Pointer to dev_flow structure.
473 * Item specification.
474 * @param[in] item_flags
478 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
479 const struct rte_flow_item *item,
482 const struct rte_flow_item_eth *spec = item->spec;
483 const struct rte_flow_item_eth *mask = item->mask;
484 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
485 struct ibv_flow_spec_eth eth = {
486 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
491 mask = &rte_flow_item_eth_mask;
495 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
497 memcpy(ð.val.src_mac, spec->src.addr_bytes,
499 eth.val.ether_type = spec->type;
500 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
502 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
504 eth.mask.ether_type = mask->type;
505 /* Remove unwanted bits from values. */
506 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
507 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
508 eth.val.src_mac[i] &= eth.mask.src_mac[i];
510 eth.val.ether_type &= eth.mask.ether_type;
512 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
516 * Update the VLAN tag in the Verbs Ethernet specification.
517 * This function assumes that the input is valid and there is space to add
518 * the requested item.
520 * @param[in, out] attr
521 * Pointer to Verbs attributes structure.
523 * Verbs structure containing the VLAN information to copy.
526 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
527 struct ibv_flow_spec_eth *eth)
530 const enum ibv_flow_spec_type search = eth->type;
531 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
532 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
534 for (i = 0; i != attr->num_of_specs; ++i) {
535 if (hdr->type == search) {
536 struct ibv_flow_spec_eth *e =
537 (struct ibv_flow_spec_eth *)hdr;
539 e->val.vlan_tag = eth->val.vlan_tag;
540 e->mask.vlan_tag = eth->mask.vlan_tag;
541 e->val.ether_type = eth->val.ether_type;
542 e->mask.ether_type = eth->mask.ether_type;
545 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
550 * Convert the @p item into a Verbs specification. This function assumes that
551 * the input is valid and that there is space to insert the requested item
554 * @param[in, out] dev_flow
555 * Pointer to dev_flow structure.
557 * Item specification.
558 * @param[in] item_flags
562 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
563 const struct rte_flow_item *item,
566 const struct rte_flow_item_vlan *spec = item->spec;
567 const struct rte_flow_item_vlan *mask = item->mask;
568 unsigned int size = sizeof(struct ibv_flow_spec_eth);
569 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
570 struct ibv_flow_spec_eth eth = {
571 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
574 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
575 MLX5_FLOW_LAYER_OUTER_L2;
578 mask = &rte_flow_item_vlan_mask;
580 eth.val.vlan_tag = spec->tci;
581 eth.mask.vlan_tag = mask->tci;
582 eth.val.vlan_tag &= eth.mask.vlan_tag;
583 eth.val.ether_type = spec->inner_type;
584 eth.mask.ether_type = mask->inner_type;
585 eth.val.ether_type &= eth.mask.ether_type;
587 if (!(item_flags & l2m))
588 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
590 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
592 dev_flow->handle->vf_vlan.tag =
593 rte_be_to_cpu_16(spec->tci) & 0x0fff;
597 * Convert the @p item into a Verbs specification. This function assumes that
598 * the input is valid and that there is space to insert the requested item
601 * @param[in, out] dev_flow
602 * Pointer to dev_flow structure.
604 * Item specification.
605 * @param[in] item_flags
609 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
610 const struct rte_flow_item *item,
613 const struct rte_flow_item_ipv4 *spec = item->spec;
614 const struct rte_flow_item_ipv4 *mask = item->mask;
615 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
616 struct ibv_flow_spec_ipv4_ext ipv4 = {
617 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
622 mask = &rte_flow_item_ipv4_mask;
624 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
625 .src_ip = spec->hdr.src_addr,
626 .dst_ip = spec->hdr.dst_addr,
627 .proto = spec->hdr.next_proto_id,
628 .tos = spec->hdr.type_of_service,
630 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
631 .src_ip = mask->hdr.src_addr,
632 .dst_ip = mask->hdr.dst_addr,
633 .proto = mask->hdr.next_proto_id,
634 .tos = mask->hdr.type_of_service,
636 /* Remove unwanted bits from values. */
637 ipv4.val.src_ip &= ipv4.mask.src_ip;
638 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
639 ipv4.val.proto &= ipv4.mask.proto;
640 ipv4.val.tos &= ipv4.mask.tos;
642 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
646 * Convert the @p item into a Verbs specification. This function assumes that
647 * the input is valid and that there is space to insert the requested item
650 * @param[in, out] dev_flow
651 * Pointer to dev_flow structure.
653 * Item specification.
654 * @param[in] item_flags
658 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
659 const struct rte_flow_item *item,
662 const struct rte_flow_item_ipv6 *spec = item->spec;
663 const struct rte_flow_item_ipv6 *mask = item->mask;
664 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
665 struct ibv_flow_spec_ipv6 ipv6 = {
666 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
671 mask = &rte_flow_item_ipv6_mask;
674 uint32_t vtc_flow_val;
675 uint32_t vtc_flow_mask;
677 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
678 RTE_DIM(ipv6.val.src_ip));
679 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
680 RTE_DIM(ipv6.val.dst_ip));
681 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
682 RTE_DIM(ipv6.mask.src_ip));
683 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
684 RTE_DIM(ipv6.mask.dst_ip));
685 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
686 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
687 ipv6.val.flow_label =
688 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
689 RTE_IPV6_HDR_FL_SHIFT);
690 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
691 RTE_IPV6_HDR_TC_SHIFT;
692 ipv6.val.next_hdr = spec->hdr.proto;
693 ipv6.mask.flow_label =
694 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
695 RTE_IPV6_HDR_FL_SHIFT);
696 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
697 RTE_IPV6_HDR_TC_SHIFT;
698 ipv6.mask.next_hdr = mask->hdr.proto;
699 /* Remove unwanted bits from values. */
700 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
701 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
702 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
704 ipv6.val.flow_label &= ipv6.mask.flow_label;
705 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
706 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
708 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
712 * Convert the @p item into a Verbs specification. This function assumes that
713 * the input is valid and that there is space to insert the requested item
716 * @param[in, out] dev_flow
717 * Pointer to dev_flow structure.
719 * Item specification.
720 * @param[in] item_flags
724 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
725 const struct rte_flow_item *item,
726 uint64_t item_flags __rte_unused)
728 const struct rte_flow_item_tcp *spec = item->spec;
729 const struct rte_flow_item_tcp *mask = item->mask;
730 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
731 struct ibv_flow_spec_tcp_udp tcp = {
732 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
737 mask = &rte_flow_item_tcp_mask;
739 tcp.val.dst_port = spec->hdr.dst_port;
740 tcp.val.src_port = spec->hdr.src_port;
741 tcp.mask.dst_port = mask->hdr.dst_port;
742 tcp.mask.src_port = mask->hdr.src_port;
743 /* Remove unwanted bits from values. */
744 tcp.val.src_port &= tcp.mask.src_port;
745 tcp.val.dst_port &= tcp.mask.dst_port;
747 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
751 * Convert the @p item into a Verbs specification. This function assumes that
752 * the input is valid and that there is space to insert the requested item
755 * @param[in, out] dev_flow
756 * Pointer to dev_flow structure.
758 * Item specification.
759 * @param[in] item_flags
763 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
764 const struct rte_flow_item *item,
765 uint64_t item_flags __rte_unused)
767 const struct rte_flow_item_udp *spec = item->spec;
768 const struct rte_flow_item_udp *mask = item->mask;
769 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
770 struct ibv_flow_spec_tcp_udp udp = {
771 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
776 mask = &rte_flow_item_udp_mask;
778 udp.val.dst_port = spec->hdr.dst_port;
779 udp.val.src_port = spec->hdr.src_port;
780 udp.mask.dst_port = mask->hdr.dst_port;
781 udp.mask.src_port = mask->hdr.src_port;
782 /* Remove unwanted bits from values. */
783 udp.val.src_port &= udp.mask.src_port;
784 udp.val.dst_port &= udp.mask.dst_port;
787 while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
789 if (!(udp.val.dst_port & udp.mask.dst_port)) {
790 switch ((item)->type) {
791 case RTE_FLOW_ITEM_TYPE_VXLAN:
792 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
793 udp.mask.dst_port = 0xffff;
795 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
796 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
797 udp.mask.dst_port = 0xffff;
799 case RTE_FLOW_ITEM_TYPE_MPLS:
800 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
801 udp.mask.dst_port = 0xffff;
808 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
812 * Convert the @p item into a Verbs specification. This function assumes that
813 * the input is valid and that there is space to insert the requested item
816 * @param[in, out] dev_flow
817 * Pointer to dev_flow structure.
819 * Item specification.
820 * @param[in] item_flags
824 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
825 const struct rte_flow_item *item,
826 uint64_t item_flags __rte_unused)
828 const struct rte_flow_item_vxlan *spec = item->spec;
829 const struct rte_flow_item_vxlan *mask = item->mask;
830 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
831 struct ibv_flow_spec_tunnel vxlan = {
832 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
838 } id = { .vlan_id = 0, };
841 mask = &rte_flow_item_vxlan_mask;
843 memcpy(&id.vni[1], spec->vni, 3);
844 vxlan.val.tunnel_id = id.vlan_id;
845 memcpy(&id.vni[1], mask->vni, 3);
846 vxlan.mask.tunnel_id = id.vlan_id;
847 /* Remove unwanted bits from values. */
848 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
850 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
854 * Convert the @p item into a Verbs specification. This function assumes that
855 * the input is valid and that there is space to insert the requested item
858 * @param[in, out] dev_flow
859 * Pointer to dev_flow structure.
861 * Item specification.
862 * @param[in] item_flags
866 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
867 const struct rte_flow_item *item,
868 uint64_t item_flags __rte_unused)
870 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
871 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
872 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
873 struct ibv_flow_spec_tunnel vxlan_gpe = {
874 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
880 } id = { .vlan_id = 0, };
883 mask = &rte_flow_item_vxlan_gpe_mask;
885 memcpy(&id.vni[1], spec->vni, 3);
886 vxlan_gpe.val.tunnel_id = id.vlan_id;
887 memcpy(&id.vni[1], mask->vni, 3);
888 vxlan_gpe.mask.tunnel_id = id.vlan_id;
889 /* Remove unwanted bits from values. */
890 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
892 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
896 * Update the protocol in Verbs IPv4/IPv6 spec.
898 * @param[in, out] attr
899 * Pointer to Verbs attributes structure.
901 * Specification type to search in order to update the IP protocol.
902 * @param[in] protocol
903 * Protocol value to set if none is present in the specification.
906 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
907 enum ibv_flow_spec_type search,
911 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
912 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
916 for (i = 0; i != attr->num_of_specs; ++i) {
917 if (hdr->type == search) {
919 struct ibv_flow_spec_ipv4_ext *ipv4;
920 struct ibv_flow_spec_ipv6 *ipv6;
924 case IBV_FLOW_SPEC_IPV4_EXT:
925 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
926 if (!ip.ipv4->val.proto) {
927 ip.ipv4->val.proto = protocol;
928 ip.ipv4->mask.proto = 0xff;
931 case IBV_FLOW_SPEC_IPV6:
932 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
933 if (!ip.ipv6->val.next_hdr) {
934 ip.ipv6->val.next_hdr = protocol;
935 ip.ipv6->mask.next_hdr = 0xff;
943 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
948 * Convert the @p item into a Verbs specification. This function assumes that
949 * the input is valid and that there is space to insert the requested item
952 * @param[in, out] dev_flow
953 * Pointer to dev_flow structure.
955 * Item specification.
956 * @param[in] item_flags
960 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
961 const struct rte_flow_item *item __rte_unused,
964 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
965 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
966 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
967 struct ibv_flow_spec_tunnel tunnel = {
968 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
972 const struct rte_flow_item_gre *spec = item->spec;
973 const struct rte_flow_item_gre *mask = item->mask;
974 unsigned int size = sizeof(struct ibv_flow_spec_gre);
975 struct ibv_flow_spec_gre tunnel = {
976 .type = IBV_FLOW_SPEC_GRE,
981 mask = &rte_flow_item_gre_mask;
983 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
984 tunnel.val.protocol = spec->protocol;
985 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
986 tunnel.mask.protocol = mask->protocol;
987 /* Remove unwanted bits from values. */
988 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
989 tunnel.val.protocol &= tunnel.mask.protocol;
990 tunnel.val.key &= tunnel.mask.key;
993 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
994 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
995 IBV_FLOW_SPEC_IPV4_EXT,
998 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
1001 flow_verbs_spec_add(verbs, &tunnel, size);
1005 * Convert the @p action into a Verbs specification. This function assumes that
1006 * the input is valid and that there is space to insert the requested action
1007 * into the flow. This function also return the action that was added.
1009 * @param[in, out] dev_flow
1010 * Pointer to dev_flow structure.
1012 * Item specification.
1013 * @param[in] item_flags
1014 * Parsed item flags.
1017 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1018 const struct rte_flow_item *item __rte_unused,
1019 uint64_t item_flags __rte_unused)
1021 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1022 const struct rte_flow_item_mpls *spec = item->spec;
1023 const struct rte_flow_item_mpls *mask = item->mask;
1024 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1025 struct ibv_flow_spec_mpls mpls = {
1026 .type = IBV_FLOW_SPEC_MPLS,
1031 mask = &rte_flow_item_mpls_mask;
1033 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1034 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1035 /* Remove unwanted bits from values. */
1036 mpls.val.label &= mpls.mask.label;
1038 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1043 * Convert the @p action into a Verbs specification. This function assumes that
1044 * the input is valid and that there is space to insert the requested action
1047 * @param[in] dev_flow
1048 * Pointer to mlx5_flow.
1050 * Action configuration.
1053 flow_verbs_translate_action_drop
1054 (struct mlx5_flow *dev_flow,
1055 const struct rte_flow_action *action __rte_unused)
1057 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1058 struct ibv_flow_spec_action_drop drop = {
1059 .type = IBV_FLOW_SPEC_ACTION_DROP,
1063 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1067 * Convert the @p action into a Verbs specification. This function assumes that
1068 * the input is valid and that there is space to insert the requested action
1071 * @param[in] rss_desc
1072 * Pointer to mlx5_flow_rss_desc.
1074 * Action configuration.
1077 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1078 const struct rte_flow_action *action)
1080 const struct rte_flow_action_queue *queue = action->conf;
1082 rss_desc->queue[0] = queue->index;
1083 rss_desc->queue_num = 1;
1087 * Convert the @p action into a Verbs specification. This function assumes that
1088 * the input is valid and that there is space to insert the requested action
1091 * @param[in] rss_desc
1092 * Pointer to mlx5_flow_rss_desc.
1094 * Action configuration.
1097 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1098 const struct rte_flow_action *action)
1100 const struct rte_flow_action_rss *rss = action->conf;
1101 const uint8_t *rss_key;
1103 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1104 rss_desc->queue_num = rss->queue_num;
1105 /* NULL RSS key indicates default RSS key. */
1106 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1107 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1109 * rss->level and rss.types should be set in advance when expanding
1115 * Convert the @p action into a Verbs specification. This function assumes that
1116 * the input is valid and that there is space to insert the requested action
1119 * @param[in] dev_flow
1120 * Pointer to mlx5_flow.
1122 * Action configuration.
1125 flow_verbs_translate_action_flag
1126 (struct mlx5_flow *dev_flow,
1127 const struct rte_flow_action *action __rte_unused)
1129 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1130 struct ibv_flow_spec_action_tag tag = {
1131 .type = IBV_FLOW_SPEC_ACTION_TAG,
1133 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1136 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1140 * Convert the @p action into a Verbs specification. This function assumes that
1141 * the input is valid and that there is space to insert the requested action
1144 * @param[in] dev_flow
1145 * Pointer to mlx5_flow.
1147 * Action configuration.
1150 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1151 const struct rte_flow_action *action)
1153 const struct rte_flow_action_mark *mark = action->conf;
1154 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1155 struct ibv_flow_spec_action_tag tag = {
1156 .type = IBV_FLOW_SPEC_ACTION_TAG,
1158 .tag_id = mlx5_flow_mark_set(mark->id),
1161 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1165 * Convert the @p action into a Verbs specification. This function assumes that
1166 * the input is valid and that there is space to insert the requested action
1170 * Pointer to the Ethernet device structure.
1172 * Action configuration.
1173 * @param[in] dev_flow
1174 * Pointer to mlx5_flow.
1176 * Pointer to error structure.
1179 * 0 On success else a negative errno value is returned and rte_errno is set.
1182 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1183 const struct rte_flow_action *action,
1184 struct rte_eth_dev *dev,
1185 struct rte_flow_error *error)
1187 const struct rte_flow_action_count *count = action->conf;
1188 struct rte_flow *flow = dev_flow->flow;
1189 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1190 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1191 struct mlx5_flow_counter_pool *pool;
1192 struct mlx5_flow_counter *cnt = NULL;
1193 struct mlx5_flow_counter_ext *cnt_ext;
1194 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1195 struct ibv_flow_spec_counter_action counter = {
1196 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1201 if (!flow->counter) {
1202 flow->counter = flow_verbs_counter_new(dev, count->shared,
1205 return rte_flow_error_set(error, rte_errno,
1206 RTE_FLOW_ERROR_TYPE_ACTION,
1208 "cannot get counter"
1211 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1212 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1213 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
1214 counter.counter_set_handle = cnt_ext->cs->handle;
1215 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1216 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1217 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1218 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
1219 counter.counters = cnt_ext->cs;
1220 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1226 * Internal validation function. For validating both actions and items.
1229 * Pointer to the Ethernet device structure.
1231 * Pointer to the flow attributes.
1233 * Pointer to the list of items.
1234 * @param[in] actions
1235 * Pointer to the list of actions.
1236 * @param[in] external
1237 * This flow rule is created by request external to PMD.
1238 * @param[in] hairpin
1239 * Number of hairpin TX actions, 0 means classic flow.
1241 * Pointer to the error structure.
1244 * 0 on success, a negative errno value otherwise and rte_errno is set.
1247 flow_verbs_validate(struct rte_eth_dev *dev,
1248 const struct rte_flow_attr *attr,
1249 const struct rte_flow_item items[],
1250 const struct rte_flow_action actions[],
1251 bool external __rte_unused,
1252 int hairpin __rte_unused,
1253 struct rte_flow_error *error)
1256 uint64_t action_flags = 0;
1257 uint64_t item_flags = 0;
1258 uint64_t last_item = 0;
1259 uint8_t next_protocol = 0xff;
1260 uint16_t ether_type = 0;
1264 ret = mlx5_flow_validate_attributes(dev, attr, error);
1267 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1268 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1271 switch (items->type) {
1272 case RTE_FLOW_ITEM_TYPE_VOID:
1274 case RTE_FLOW_ITEM_TYPE_ETH:
1275 ret = mlx5_flow_validate_item_eth(items, item_flags,
1279 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1280 MLX5_FLOW_LAYER_OUTER_L2;
1281 if (items->mask != NULL && items->spec != NULL) {
1283 ((const struct rte_flow_item_eth *)
1286 ((const struct rte_flow_item_eth *)
1288 ether_type = rte_be_to_cpu_16(ether_type);
1293 case RTE_FLOW_ITEM_TYPE_VLAN:
1294 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1298 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1299 MLX5_FLOW_LAYER_INNER_VLAN) :
1300 (MLX5_FLOW_LAYER_OUTER_L2 |
1301 MLX5_FLOW_LAYER_OUTER_VLAN);
1302 if (items->mask != NULL && items->spec != NULL) {
1304 ((const struct rte_flow_item_vlan *)
1305 items->spec)->inner_type;
1307 ((const struct rte_flow_item_vlan *)
1308 items->mask)->inner_type;
1309 ether_type = rte_be_to_cpu_16(ether_type);
1314 case RTE_FLOW_ITEM_TYPE_IPV4:
1315 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1321 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1322 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1323 if (items->mask != NULL &&
1324 ((const struct rte_flow_item_ipv4 *)
1325 items->mask)->hdr.next_proto_id) {
1327 ((const struct rte_flow_item_ipv4 *)
1328 (items->spec))->hdr.next_proto_id;
1330 ((const struct rte_flow_item_ipv4 *)
1331 (items->mask))->hdr.next_proto_id;
1333 /* Reset for inner layer. */
1334 next_protocol = 0xff;
1337 case RTE_FLOW_ITEM_TYPE_IPV6:
1338 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1344 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1345 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1346 if (items->mask != NULL &&
1347 ((const struct rte_flow_item_ipv6 *)
1348 items->mask)->hdr.proto) {
1350 ((const struct rte_flow_item_ipv6 *)
1351 items->spec)->hdr.proto;
1353 ((const struct rte_flow_item_ipv6 *)
1354 items->mask)->hdr.proto;
1356 /* Reset for inner layer. */
1357 next_protocol = 0xff;
1360 case RTE_FLOW_ITEM_TYPE_UDP:
1361 ret = mlx5_flow_validate_item_udp(items, item_flags,
1366 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1367 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1369 case RTE_FLOW_ITEM_TYPE_TCP:
1370 ret = mlx5_flow_validate_item_tcp
1373 &rte_flow_item_tcp_mask,
1377 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1378 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1380 case RTE_FLOW_ITEM_TYPE_VXLAN:
1381 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1385 last_item = MLX5_FLOW_LAYER_VXLAN;
1387 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1388 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1393 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1395 case RTE_FLOW_ITEM_TYPE_GRE:
1396 ret = mlx5_flow_validate_item_gre(items, item_flags,
1397 next_protocol, error);
1400 last_item = MLX5_FLOW_LAYER_GRE;
1402 case RTE_FLOW_ITEM_TYPE_MPLS:
1403 ret = mlx5_flow_validate_item_mpls(dev, items,
1408 last_item = MLX5_FLOW_LAYER_MPLS;
1411 return rte_flow_error_set(error, ENOTSUP,
1412 RTE_FLOW_ERROR_TYPE_ITEM,
1413 NULL, "item not supported");
1415 item_flags |= last_item;
1417 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1418 switch (actions->type) {
1419 case RTE_FLOW_ACTION_TYPE_VOID:
1421 case RTE_FLOW_ACTION_TYPE_FLAG:
1422 ret = mlx5_flow_validate_action_flag(action_flags,
1427 action_flags |= MLX5_FLOW_ACTION_FLAG;
1429 case RTE_FLOW_ACTION_TYPE_MARK:
1430 ret = mlx5_flow_validate_action_mark(actions,
1436 action_flags |= MLX5_FLOW_ACTION_MARK;
1438 case RTE_FLOW_ACTION_TYPE_DROP:
1439 ret = mlx5_flow_validate_action_drop(action_flags,
1444 action_flags |= MLX5_FLOW_ACTION_DROP;
1446 case RTE_FLOW_ACTION_TYPE_QUEUE:
1447 ret = mlx5_flow_validate_action_queue(actions,
1453 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1455 case RTE_FLOW_ACTION_TYPE_RSS:
1456 ret = mlx5_flow_validate_action_rss(actions,
1462 action_flags |= MLX5_FLOW_ACTION_RSS;
1464 case RTE_FLOW_ACTION_TYPE_COUNT:
1465 ret = mlx5_flow_validate_action_count(dev, attr, error);
1468 action_flags |= MLX5_FLOW_ACTION_COUNT;
1471 return rte_flow_error_set(error, ENOTSUP,
1472 RTE_FLOW_ERROR_TYPE_ACTION,
1474 "action not supported");
1478 * Validate the drop action mutual exclusion with other actions.
1479 * Drop action is mutually-exclusive with any other action, except for
1482 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1483 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1484 return rte_flow_error_set(error, EINVAL,
1485 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1486 "Drop action is mutually-exclusive "
1487 "with any other action, except for "
1489 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1490 return rte_flow_error_set(error, EINVAL,
1491 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1492 "no fate action is found");
1497 * Calculate the required bytes that are needed for the action part of the verbs
1500 * @param[in] actions
1501 * Pointer to the list of actions.
1504 * The size of the memory needed for all actions.
1507 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1511 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1512 switch (actions->type) {
1513 case RTE_FLOW_ACTION_TYPE_VOID:
1515 case RTE_FLOW_ACTION_TYPE_FLAG:
1516 size += sizeof(struct ibv_flow_spec_action_tag);
1518 case RTE_FLOW_ACTION_TYPE_MARK:
1519 size += sizeof(struct ibv_flow_spec_action_tag);
1521 case RTE_FLOW_ACTION_TYPE_DROP:
1522 size += sizeof(struct ibv_flow_spec_action_drop);
1524 case RTE_FLOW_ACTION_TYPE_QUEUE:
1526 case RTE_FLOW_ACTION_TYPE_RSS:
1528 case RTE_FLOW_ACTION_TYPE_COUNT:
1529 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1530 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1531 size += sizeof(struct ibv_flow_spec_counter_action);
1542 * Calculate the required bytes that are needed for the item part of the verbs
1546 * Pointer to the list of items.
1549 * The size of the memory needed for all items.
1552 flow_verbs_get_items_size(const struct rte_flow_item items[])
1556 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1557 switch (items->type) {
1558 case RTE_FLOW_ITEM_TYPE_VOID:
1560 case RTE_FLOW_ITEM_TYPE_ETH:
1561 size += sizeof(struct ibv_flow_spec_eth);
1563 case RTE_FLOW_ITEM_TYPE_VLAN:
1564 size += sizeof(struct ibv_flow_spec_eth);
1566 case RTE_FLOW_ITEM_TYPE_IPV4:
1567 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1569 case RTE_FLOW_ITEM_TYPE_IPV6:
1570 size += sizeof(struct ibv_flow_spec_ipv6);
1572 case RTE_FLOW_ITEM_TYPE_UDP:
1573 size += sizeof(struct ibv_flow_spec_tcp_udp);
1575 case RTE_FLOW_ITEM_TYPE_TCP:
1576 size += sizeof(struct ibv_flow_spec_tcp_udp);
1578 case RTE_FLOW_ITEM_TYPE_VXLAN:
1579 size += sizeof(struct ibv_flow_spec_tunnel);
1581 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1582 size += sizeof(struct ibv_flow_spec_tunnel);
1584 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1585 case RTE_FLOW_ITEM_TYPE_GRE:
1586 size += sizeof(struct ibv_flow_spec_gre);
1588 case RTE_FLOW_ITEM_TYPE_MPLS:
1589 size += sizeof(struct ibv_flow_spec_mpls);
1592 case RTE_FLOW_ITEM_TYPE_GRE:
1593 size += sizeof(struct ibv_flow_spec_tunnel);
1604 * Internal preparation function. Allocate mlx5_flow with the required size.
1605 * The required size is calculate based on the actions and items. This function
1606 * also returns the detected actions and items for later use.
1609 * Pointer to Ethernet device.
1611 * Pointer to the flow attributes.
1613 * Pointer to the list of items.
1614 * @param[in] actions
1615 * Pointer to the list of actions.
1617 * Pointer to the error structure.
1620 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1623 static struct mlx5_flow *
1624 flow_verbs_prepare(struct rte_eth_dev *dev,
1625 const struct rte_flow_attr *attr __rte_unused,
1626 const struct rte_flow_item items[],
1627 const struct rte_flow_action actions[],
1628 struct rte_flow_error *error)
1631 uint32_t handle_idx = 0;
1632 struct mlx5_flow *dev_flow;
1633 struct mlx5_flow_handle *dev_handle;
1634 struct mlx5_priv *priv = dev->data->dev_private;
1636 size += flow_verbs_get_actions_size(actions);
1637 size += flow_verbs_get_items_size(items);
1638 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1639 rte_flow_error_set(error, E2BIG,
1640 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1641 "Verbs spec/action size too large");
1644 /* In case of corrupting the memory. */
1645 if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1646 rte_flow_error_set(error, ENOSPC,
1647 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1648 "not free temporary device flow");
1651 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1654 rte_flow_error_set(error, ENOMEM,
1655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1656 "not enough memory to create flow handle");
1659 /* No multi-thread supporting. */
1660 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
1661 dev_flow->handle = dev_handle;
1662 dev_flow->handle_idx = handle_idx;
1663 /* Memcpy is used, only size needs to be cleared to 0. */
1664 dev_flow->verbs.size = 0;
1665 dev_flow->verbs.attr.num_of_specs = 0;
1666 dev_flow->ingress = attr->ingress;
1667 dev_flow->hash_fields = 0;
1668 /* Need to set transfer attribute: not supported in Verbs mode. */
1673 * Fill the flow with verb spec.
1676 * Pointer to Ethernet device.
1677 * @param[in, out] dev_flow
1678 * Pointer to the mlx5 flow.
1680 * Pointer to the flow attributes.
1682 * Pointer to the list of items.
1683 * @param[in] actions
1684 * Pointer to the list of actions.
1686 * Pointer to the error structure.
1689 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1692 flow_verbs_translate(struct rte_eth_dev *dev,
1693 struct mlx5_flow *dev_flow,
1694 const struct rte_flow_attr *attr,
1695 const struct rte_flow_item items[],
1696 const struct rte_flow_action actions[],
1697 struct rte_flow_error *error)
1699 uint64_t item_flags = 0;
1700 uint64_t action_flags = 0;
1701 uint64_t priority = attr->priority;
1702 uint32_t subpriority = 0;
1703 struct mlx5_priv *priv = dev->data->dev_private;
1704 struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
1706 [!!priv->flow_nested_idx];
1708 if (priority == MLX5_FLOW_PRIO_RSVD)
1709 priority = priv->config.flow_prio - 1;
1710 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1713 switch (actions->type) {
1714 case RTE_FLOW_ACTION_TYPE_VOID:
1716 case RTE_FLOW_ACTION_TYPE_FLAG:
1717 flow_verbs_translate_action_flag(dev_flow, actions);
1718 action_flags |= MLX5_FLOW_ACTION_FLAG;
1719 dev_flow->handle->mark = 1;
1721 case RTE_FLOW_ACTION_TYPE_MARK:
1722 flow_verbs_translate_action_mark(dev_flow, actions);
1723 action_flags |= MLX5_FLOW_ACTION_MARK;
1724 dev_flow->handle->mark = 1;
1726 case RTE_FLOW_ACTION_TYPE_DROP:
1727 flow_verbs_translate_action_drop(dev_flow, actions);
1728 action_flags |= MLX5_FLOW_ACTION_DROP;
1729 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1731 case RTE_FLOW_ACTION_TYPE_QUEUE:
1732 flow_verbs_translate_action_queue(rss_desc, actions);
1733 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1734 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1736 case RTE_FLOW_ACTION_TYPE_RSS:
1737 flow_verbs_translate_action_rss(rss_desc, actions);
1738 action_flags |= MLX5_FLOW_ACTION_RSS;
1739 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1741 case RTE_FLOW_ACTION_TYPE_COUNT:
1742 ret = flow_verbs_translate_action_count(dev_flow,
1747 action_flags |= MLX5_FLOW_ACTION_COUNT;
1750 return rte_flow_error_set(error, ENOTSUP,
1751 RTE_FLOW_ERROR_TYPE_ACTION,
1753 "action not supported");
1756 dev_flow->act_flags = action_flags;
1757 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1758 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1760 switch (items->type) {
1761 case RTE_FLOW_ITEM_TYPE_VOID:
1763 case RTE_FLOW_ITEM_TYPE_ETH:
1764 flow_verbs_translate_item_eth(dev_flow, items,
1766 subpriority = MLX5_PRIORITY_MAP_L2;
1767 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1768 MLX5_FLOW_LAYER_OUTER_L2;
1770 case RTE_FLOW_ITEM_TYPE_VLAN:
1771 flow_verbs_translate_item_vlan(dev_flow, items,
1773 subpriority = MLX5_PRIORITY_MAP_L2;
1774 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1775 MLX5_FLOW_LAYER_INNER_VLAN) :
1776 (MLX5_FLOW_LAYER_OUTER_L2 |
1777 MLX5_FLOW_LAYER_OUTER_VLAN);
1779 case RTE_FLOW_ITEM_TYPE_IPV4:
1780 flow_verbs_translate_item_ipv4(dev_flow, items,
1782 subpriority = MLX5_PRIORITY_MAP_L3;
1783 dev_flow->hash_fields |=
1784 mlx5_flow_hashfields_adjust
1786 MLX5_IPV4_LAYER_TYPES,
1787 MLX5_IPV4_IBV_RX_HASH);
1788 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1789 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1791 case RTE_FLOW_ITEM_TYPE_IPV6:
1792 flow_verbs_translate_item_ipv6(dev_flow, items,
1794 subpriority = MLX5_PRIORITY_MAP_L3;
1795 dev_flow->hash_fields |=
1796 mlx5_flow_hashfields_adjust
1798 MLX5_IPV6_LAYER_TYPES,
1799 MLX5_IPV6_IBV_RX_HASH);
1800 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1801 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1803 case RTE_FLOW_ITEM_TYPE_TCP:
1804 flow_verbs_translate_item_tcp(dev_flow, items,
1806 subpriority = MLX5_PRIORITY_MAP_L4;
1807 dev_flow->hash_fields |=
1808 mlx5_flow_hashfields_adjust
1809 (rss_desc, tunnel, ETH_RSS_TCP,
1810 (IBV_RX_HASH_SRC_PORT_TCP |
1811 IBV_RX_HASH_DST_PORT_TCP));
1812 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1813 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1815 case RTE_FLOW_ITEM_TYPE_UDP:
1816 flow_verbs_translate_item_udp(dev_flow, items,
1818 subpriority = MLX5_PRIORITY_MAP_L4;
1819 dev_flow->hash_fields |=
1820 mlx5_flow_hashfields_adjust
1821 (rss_desc, tunnel, ETH_RSS_UDP,
1822 (IBV_RX_HASH_SRC_PORT_UDP |
1823 IBV_RX_HASH_DST_PORT_UDP));
1824 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1825 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1827 case RTE_FLOW_ITEM_TYPE_VXLAN:
1828 flow_verbs_translate_item_vxlan(dev_flow, items,
1830 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1831 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1833 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1834 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1836 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1837 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1839 case RTE_FLOW_ITEM_TYPE_GRE:
1840 flow_verbs_translate_item_gre(dev_flow, items,
1842 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1843 item_flags |= MLX5_FLOW_LAYER_GRE;
1845 case RTE_FLOW_ITEM_TYPE_MPLS:
1846 flow_verbs_translate_item_mpls(dev_flow, items,
1848 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1849 item_flags |= MLX5_FLOW_LAYER_MPLS;
1852 return rte_flow_error_set(error, ENOTSUP,
1853 RTE_FLOW_ERROR_TYPE_ITEM,
1855 "item not supported");
1858 dev_flow->handle->layers = item_flags;
1859 /* Other members of attr will be ignored. */
1860 dev_flow->verbs.attr.priority =
1861 mlx5_flow_adjust_priority(dev, priority, subpriority);
1862 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1867 * Remove the flow from the NIC but keeps it in memory.
1870 * Pointer to the Ethernet device structure.
1871 * @param[in, out] flow
1872 * Pointer to flow structure.
1875 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1877 struct mlx5_priv *priv = dev->data->dev_private;
1878 struct mlx5_flow_handle *handle;
1879 uint32_t handle_idx;
1883 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1884 handle_idx, handle, next) {
1885 if (handle->drv_flow) {
1886 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1887 handle->drv_flow = NULL;
1889 /* hrxq is union, don't touch it only the flag is set. */
1890 if (handle->rix_hrxq) {
1891 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1892 mlx5_hrxq_drop_release(dev);
1893 handle->rix_hrxq = 0;
1894 } else if (handle->fate_action ==
1895 MLX5_FLOW_FATE_QUEUE) {
1896 mlx5_hrxq_release(dev, handle->rix_hrxq);
1897 handle->rix_hrxq = 0;
1900 if (handle->vf_vlan.tag && handle->vf_vlan.created)
1901 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1906 * Remove the flow from the NIC and the memory.
1909 * Pointer to the Ethernet device structure.
1910 * @param[in, out] flow
1911 * Pointer to flow structure.
1914 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1916 struct mlx5_priv *priv = dev->data->dev_private;
1917 struct mlx5_flow_handle *handle;
1921 flow_verbs_remove(dev, flow);
1922 while (flow->dev_handles) {
1923 uint32_t tmp_idx = flow->dev_handles;
1925 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1929 flow->dev_handles = handle->next.next;
1930 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1933 if (flow->counter) {
1934 flow_verbs_counter_release(dev, flow->counter);
1940 * Apply the flow to the NIC.
1943 * Pointer to the Ethernet device structure.
1944 * @param[in, out] flow
1945 * Pointer to flow structure.
1947 * Pointer to error structure.
1950 * 0 on success, a negative errno value otherwise and rte_errno is set.
1953 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1954 struct rte_flow_error *error)
1956 struct mlx5_priv *priv = dev->data->dev_private;
1957 struct mlx5_flow_handle *handle;
1958 struct mlx5_flow *dev_flow;
1959 struct mlx5_hrxq *hrxq;
1960 uint32_t dev_handles;
1964 for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
1965 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
1966 handle = dev_flow->handle;
1967 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1968 hrxq = mlx5_hrxq_drop_new(dev);
1972 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1973 "cannot get drop hash queue");
1978 struct mlx5_flow_rss_desc *rss_desc =
1979 &((struct mlx5_flow_rss_desc *)priv->rss_desc)
1980 [!!priv->flow_nested_idx];
1982 MLX5_ASSERT(rss_desc->queue_num);
1983 hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
1984 MLX5_RSS_HASH_KEY_LEN,
1985 dev_flow->hash_fields,
1987 rss_desc->queue_num);
1989 hrxq_idx = mlx5_hrxq_new(dev, rss_desc->key,
1990 MLX5_RSS_HASH_KEY_LEN,
1991 dev_flow->hash_fields,
1993 rss_desc->queue_num,
1995 MLX5_FLOW_LAYER_TUNNEL));
1996 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
2001 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2002 "cannot get hash queue");
2005 handle->rix_hrxq = hrxq_idx;
2008 handle->drv_flow = mlx5_glue->create_flow
2009 (hrxq->qp, &dev_flow->verbs.attr);
2010 if (!handle->drv_flow) {
2011 rte_flow_error_set(error, errno,
2012 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2014 "hardware refuses to create flow");
2017 if (priv->vmwa_context &&
2018 handle->vf_vlan.tag && !handle->vf_vlan.created) {
2020 * The rule contains the VLAN pattern.
2021 * For VF we are going to create VLAN
2022 * interface to make hypervisor set correct
2023 * e-Switch vport context.
2025 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2030 err = rte_errno; /* Save rte_errno before cleanup. */
2031 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2032 dev_handles, handle, next) {
2033 /* hrxq is union, don't touch it only the flag is set. */
2034 if (handle->rix_hrxq) {
2035 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
2036 mlx5_hrxq_drop_release(dev);
2037 handle->rix_hrxq = 0;
2038 } else if (handle->fate_action ==
2039 MLX5_FLOW_FATE_QUEUE) {
2040 mlx5_hrxq_release(dev, handle->rix_hrxq);
2041 handle->rix_hrxq = 0;
2044 if (handle->vf_vlan.tag && handle->vf_vlan.created)
2045 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2047 rte_errno = err; /* Restore rte_errno. */
2054 * @see rte_flow_query()
2058 flow_verbs_query(struct rte_eth_dev *dev,
2059 struct rte_flow *flow,
2060 const struct rte_flow_action *actions,
2062 struct rte_flow_error *error)
2066 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2067 switch (actions->type) {
2068 case RTE_FLOW_ACTION_TYPE_VOID:
2070 case RTE_FLOW_ACTION_TYPE_COUNT:
2071 ret = flow_verbs_counter_query(dev, flow, data, error);
2074 return rte_flow_error_set(error, ENOTSUP,
2075 RTE_FLOW_ERROR_TYPE_ACTION,
2077 "action not supported");
2083 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2084 .validate = flow_verbs_validate,
2085 .prepare = flow_verbs_prepare,
2086 .translate = flow_verbs_translate,
2087 .apply = flow_verbs_apply,
2088 .remove = flow_verbs_remove,
2089 .destroy = flow_verbs_destroy,
2090 .query = flow_verbs_query,