1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
19 #include <mlx5_glue.h>
21 #include <mlx5_malloc.h>
23 #include "mlx5_defs.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rxtx.h"
28 #define VERBS_SPEC_INNER(item_flags) \
29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 { 9, 10, 11 }, { 12, 13, 14 },
42 /* Verbs specification header. */
43 struct ibv_spec_header {
44 enum ibv_flow_spec_type type;
49 * Discover the maximum number of priority available.
52 * Pointer to the Ethernet device structure.
55 * number of supported flow priority on success, a negative errno
56 * value otherwise and rte_errno is set.
59 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
61 struct mlx5_priv *priv = dev->data->dev_private;
63 struct ibv_flow_attr attr;
64 struct ibv_flow_spec_eth eth;
65 struct ibv_flow_spec_action_drop drop;
69 .port = (uint8_t)priv->dev_port,
72 .type = IBV_FLOW_SPEC_ETH,
73 .size = sizeof(struct ibv_flow_spec_eth),
76 .size = sizeof(struct ibv_flow_spec_action_drop),
77 .type = IBV_FLOW_SPEC_ACTION_DROP,
80 struct ibv_flow *flow;
81 struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
82 uint16_t vprio[] = { 8, 16 };
90 for (i = 0; i != RTE_DIM(vprio); i++) {
91 flow_attr.attr.priority = vprio[i] - 1;
92 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
95 claim_zero(mlx5_glue->destroy_flow(flow));
100 priority = RTE_DIM(priority_map_3);
103 priority = RTE_DIM(priority_map_5);
108 "port %u verbs maximum priority: %d expected 8/16",
109 dev->data->port_id, priority);
112 DRV_LOG(INFO, "port %u flow maximum priority: %d",
113 dev->data->port_id, priority);
118 * Adjust flow priority based on the highest layer and the request priority.
121 * Pointer to the Ethernet device structure.
122 * @param[in] priority
123 * The rule base priority.
124 * @param[in] subpriority
125 * The priority based on the items.
131 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
132 uint32_t subpriority)
135 struct mlx5_priv *priv = dev->data->dev_private;
137 switch (priv->config.flow_prio) {
138 case RTE_DIM(priority_map_3):
139 res = priority_map_3[priority][subpriority];
141 case RTE_DIM(priority_map_5):
142 res = priority_map_5[priority][subpriority];
149 * Get Verbs flow counter by index.
152 * Pointer to the Ethernet device structure.
154 * mlx5 flow counter index in the container.
156 * mlx5 flow counter pool in the container,
159 * A pointer to the counter, NULL otherwise.
161 static struct mlx5_flow_counter *
162 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
164 struct mlx5_flow_counter_pool **ppool)
166 struct mlx5_priv *priv = dev->data->dev_private;
167 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
168 struct mlx5_flow_counter_pool *pool;
170 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
171 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
175 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
179 * Create Verbs flow counter with Verbs library.
182 * Pointer to the Ethernet device structure.
183 * @param[in, out] counter
184 * mlx5 flow counter object, contains the counter id,
185 * handle of created Verbs flow counter is returned
186 * in cs field (if counters are supported).
189 * 0 On success else a negative errno value is returned
190 * and rte_errno is set.
193 flow_verbs_counter_create(struct rte_eth_dev *dev,
194 struct mlx5_flow_counter *counter)
196 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
197 struct mlx5_priv *priv = dev->data->dev_private;
198 struct ibv_context *ctx = priv->sh->ctx;
199 struct ibv_counter_set_init_attr init = {
200 .counter_set_id = counter->shared_info.id};
202 counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
203 if (!counter->dcs_when_free) {
208 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
209 struct mlx5_priv *priv = dev->data->dev_private;
210 struct ibv_context *ctx = priv->sh->ctx;
211 struct ibv_counters_init_attr init = {0};
212 struct ibv_counter_attach_attr attach;
215 memset(&attach, 0, sizeof(attach));
216 counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
217 if (!counter->dcs_when_free) {
221 attach.counter_desc = IBV_COUNTER_PACKETS;
223 ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
225 attach.counter_desc = IBV_COUNTER_BYTES;
227 ret = mlx5_glue->attach_counters
228 (counter->dcs_when_free, &attach, NULL);
231 claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
232 counter->dcs_when_free = NULL;
246 * Get a flow counter.
249 * Pointer to the Ethernet device structure.
251 * Indicate if this counter is shared with other flows.
253 * Counter identifier.
256 * Index to the counter, 0 otherwise and rte_errno is set.
259 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
261 struct mlx5_priv *priv = dev->data->dev_private;
262 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
263 struct mlx5_flow_counter_pool *pool = NULL;
264 struct mlx5_flow_counter *cnt = NULL;
265 union mlx5_l3t_data data;
266 uint32_t n_valid = cmng->n_valid;
267 uint32_t pool_idx, cnt_idx;
271 if (shared && !mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) &&
274 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
275 pool = cmng->pools[pool_idx];
278 cnt = TAILQ_FIRST(&pool->counters[0]);
283 struct mlx5_flow_counter_pool **pools;
286 if (n_valid == cmng->n) {
287 /* Resize the container pool array. */
288 size = sizeof(struct mlx5_flow_counter_pool *) *
289 (n_valid + MLX5_CNT_CONTAINER_RESIZE);
290 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
295 memcpy(pools, cmng->pools,
296 sizeof(struct mlx5_flow_counter_pool *) *
298 mlx5_free(cmng->pools);
301 cmng->n += MLX5_CNT_CONTAINER_RESIZE;
303 /* Allocate memory for new pool*/
304 size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
305 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
308 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
309 cnt = MLX5_POOL_GET_CNT(pool, i);
310 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
312 cnt = MLX5_POOL_GET_CNT(pool, 0);
313 cmng->pools[n_valid] = pool;
317 TAILQ_REMOVE(&pool->counters[0], cnt, next);
318 i = MLX5_CNT_ARRAY_IDX(pool, cnt);
319 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
321 data.dword = cnt_idx;
322 if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
324 cnt->shared_info.id = id;
325 cnt_idx |= MLX5_CNT_SHARED_OFFSET;
327 /* Create counter with Verbs. */
328 ret = flow_verbs_counter_create(dev, cnt);
330 cnt->dcs_when_active = cnt->dcs_when_free;
335 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
336 /* Some error occurred in Verbs library. */
342 * Release a flow counter.
345 * Pointer to the Ethernet device structure.
347 * Index to the counter handler.
350 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
352 struct mlx5_priv *priv = dev->data->dev_private;
353 struct mlx5_flow_counter_pool *pool;
354 struct mlx5_flow_counter *cnt;
356 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
357 if (IS_SHARED_CNT(counter) &&
358 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
360 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
361 claim_zero(mlx5_glue->destroy_counter_set
362 ((struct ibv_counter_set *)cnt->dcs_when_active));
363 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
364 claim_zero(mlx5_glue->destroy_counters
365 ((struct ibv_counters *)cnt->dcs_when_active));
367 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
371 * Query a flow counter via Verbs library call.
373 * @see rte_flow_query()
377 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
378 struct rte_flow *flow, void *data,
379 struct rte_flow_error *error)
381 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
382 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
384 struct mlx5_flow_counter_pool *pool;
385 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
386 (dev, flow->counter, &pool);
387 struct rte_flow_query_count *qc = data;
388 uint64_t counters[2] = {0, 0};
389 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
390 struct ibv_query_counter_set_attr query_cs_attr = {
391 .dcs_when_free = (struct ibv_counter_set *)
392 cnt->dcs_when_active,
393 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
395 struct ibv_counter_set_data query_out = {
397 .outlen = 2 * sizeof(uint64_t),
399 int err = mlx5_glue->query_counter_set(&query_cs_attr,
401 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
402 int err = mlx5_glue->query_counters
403 ((struct ibv_counters *)cnt->dcs_when_active, counters,
405 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
408 return rte_flow_error_set
410 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
412 "cannot read counter");
415 qc->hits = counters[0] - cnt->hits;
416 qc->bytes = counters[1] - cnt->bytes;
418 cnt->hits = counters[0];
419 cnt->bytes = counters[1];
423 return rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
426 "flow does not have counter");
430 return rte_flow_error_set(error, ENOTSUP,
431 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
433 "counters are not available");
438 * Add a verbs item specification into @p verbs.
441 * Pointer to verbs structure.
443 * Create specification.
445 * Size in bytes of the specification to copy.
448 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
449 void *src, unsigned int size)
455 MLX5_ASSERT(verbs->specs);
456 dst = (void *)(verbs->specs + verbs->size);
457 memcpy(dst, src, size);
458 ++verbs->attr.num_of_specs;
463 * Convert the @p item into a Verbs specification. This function assumes that
464 * the input is valid and that there is space to insert the requested item
467 * @param[in, out] dev_flow
468 * Pointer to dev_flow structure.
470 * Item specification.
471 * @param[in] item_flags
475 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
476 const struct rte_flow_item *item,
479 const struct rte_flow_item_eth *spec = item->spec;
480 const struct rte_flow_item_eth *mask = item->mask;
481 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
482 struct ibv_flow_spec_eth eth = {
483 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
488 mask = &rte_flow_item_eth_mask;
492 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
494 memcpy(ð.val.src_mac, spec->src.addr_bytes,
496 eth.val.ether_type = spec->type;
497 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
499 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
501 eth.mask.ether_type = mask->type;
502 /* Remove unwanted bits from values. */
503 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
504 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
505 eth.val.src_mac[i] &= eth.mask.src_mac[i];
507 eth.val.ether_type &= eth.mask.ether_type;
509 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
513 * Update the VLAN tag in the Verbs Ethernet specification.
514 * This function assumes that the input is valid and there is space to add
515 * the requested item.
517 * @param[in, out] attr
518 * Pointer to Verbs attributes structure.
520 * Verbs structure containing the VLAN information to copy.
523 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
524 struct ibv_flow_spec_eth *eth)
527 const enum ibv_flow_spec_type search = eth->type;
528 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
529 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
531 for (i = 0; i != attr->num_of_specs; ++i) {
532 if (hdr->type == search) {
533 struct ibv_flow_spec_eth *e =
534 (struct ibv_flow_spec_eth *)hdr;
536 e->val.vlan_tag = eth->val.vlan_tag;
537 e->mask.vlan_tag = eth->mask.vlan_tag;
538 e->val.ether_type = eth->val.ether_type;
539 e->mask.ether_type = eth->mask.ether_type;
542 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
547 * Convert the @p item into a Verbs specification. This function assumes that
548 * the input is valid and that there is space to insert the requested item
551 * @param[in, out] dev_flow
552 * Pointer to dev_flow structure.
554 * Item specification.
555 * @param[in] item_flags
559 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
560 const struct rte_flow_item *item,
563 const struct rte_flow_item_vlan *spec = item->spec;
564 const struct rte_flow_item_vlan *mask = item->mask;
565 unsigned int size = sizeof(struct ibv_flow_spec_eth);
566 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
567 struct ibv_flow_spec_eth eth = {
568 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
571 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
572 MLX5_FLOW_LAYER_OUTER_L2;
575 mask = &rte_flow_item_vlan_mask;
577 eth.val.vlan_tag = spec->tci;
578 eth.mask.vlan_tag = mask->tci;
579 eth.val.vlan_tag &= eth.mask.vlan_tag;
580 eth.val.ether_type = spec->inner_type;
581 eth.mask.ether_type = mask->inner_type;
582 eth.val.ether_type &= eth.mask.ether_type;
584 if (!(item_flags & l2m))
585 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
587 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
589 dev_flow->handle->vf_vlan.tag =
590 rte_be_to_cpu_16(spec->tci) & 0x0fff;
594 * Convert the @p item into a Verbs specification. This function assumes that
595 * the input is valid and that there is space to insert the requested item
598 * @param[in, out] dev_flow
599 * Pointer to dev_flow structure.
601 * Item specification.
602 * @param[in] item_flags
606 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
607 const struct rte_flow_item *item,
610 const struct rte_flow_item_ipv4 *spec = item->spec;
611 const struct rte_flow_item_ipv4 *mask = item->mask;
612 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
613 struct ibv_flow_spec_ipv4_ext ipv4 = {
614 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
619 mask = &rte_flow_item_ipv4_mask;
621 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
622 .src_ip = spec->hdr.src_addr,
623 .dst_ip = spec->hdr.dst_addr,
624 .proto = spec->hdr.next_proto_id,
625 .tos = spec->hdr.type_of_service,
627 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
628 .src_ip = mask->hdr.src_addr,
629 .dst_ip = mask->hdr.dst_addr,
630 .proto = mask->hdr.next_proto_id,
631 .tos = mask->hdr.type_of_service,
633 /* Remove unwanted bits from values. */
634 ipv4.val.src_ip &= ipv4.mask.src_ip;
635 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
636 ipv4.val.proto &= ipv4.mask.proto;
637 ipv4.val.tos &= ipv4.mask.tos;
639 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
643 * Convert the @p item into a Verbs specification. This function assumes that
644 * the input is valid and that there is space to insert the requested item
647 * @param[in, out] dev_flow
648 * Pointer to dev_flow structure.
650 * Item specification.
651 * @param[in] item_flags
655 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
656 const struct rte_flow_item *item,
659 const struct rte_flow_item_ipv6 *spec = item->spec;
660 const struct rte_flow_item_ipv6 *mask = item->mask;
661 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
662 struct ibv_flow_spec_ipv6 ipv6 = {
663 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
668 mask = &rte_flow_item_ipv6_mask;
671 uint32_t vtc_flow_val;
672 uint32_t vtc_flow_mask;
674 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
675 RTE_DIM(ipv6.val.src_ip));
676 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
677 RTE_DIM(ipv6.val.dst_ip));
678 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
679 RTE_DIM(ipv6.mask.src_ip));
680 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
681 RTE_DIM(ipv6.mask.dst_ip));
682 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
683 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
684 ipv6.val.flow_label =
685 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
686 RTE_IPV6_HDR_FL_SHIFT);
687 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
688 RTE_IPV6_HDR_TC_SHIFT;
689 ipv6.val.next_hdr = spec->hdr.proto;
690 ipv6.mask.flow_label =
691 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
692 RTE_IPV6_HDR_FL_SHIFT);
693 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
694 RTE_IPV6_HDR_TC_SHIFT;
695 ipv6.mask.next_hdr = mask->hdr.proto;
696 /* Remove unwanted bits from values. */
697 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
698 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
699 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
701 ipv6.val.flow_label &= ipv6.mask.flow_label;
702 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
703 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
705 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
709 * Convert the @p item into a Verbs specification. This function assumes that
710 * the input is valid and that there is space to insert the requested item
713 * @param[in, out] dev_flow
714 * Pointer to dev_flow structure.
716 * Item specification.
717 * @param[in] item_flags
721 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
722 const struct rte_flow_item *item,
723 uint64_t item_flags __rte_unused)
725 const struct rte_flow_item_tcp *spec = item->spec;
726 const struct rte_flow_item_tcp *mask = item->mask;
727 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
728 struct ibv_flow_spec_tcp_udp tcp = {
729 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
734 mask = &rte_flow_item_tcp_mask;
736 tcp.val.dst_port = spec->hdr.dst_port;
737 tcp.val.src_port = spec->hdr.src_port;
738 tcp.mask.dst_port = mask->hdr.dst_port;
739 tcp.mask.src_port = mask->hdr.src_port;
740 /* Remove unwanted bits from values. */
741 tcp.val.src_port &= tcp.mask.src_port;
742 tcp.val.dst_port &= tcp.mask.dst_port;
744 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
748 * Convert the @p item into a Verbs specification. This function assumes that
749 * the input is valid and that there is space to insert the requested item
752 * @param[in, out] dev_flow
753 * Pointer to dev_flow structure.
755 * Item specification.
756 * @param[in] item_flags
760 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
761 const struct rte_flow_item *item,
762 uint64_t item_flags __rte_unused)
764 const struct rte_flow_item_udp *spec = item->spec;
765 const struct rte_flow_item_udp *mask = item->mask;
766 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
767 struct ibv_flow_spec_tcp_udp udp = {
768 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
773 mask = &rte_flow_item_udp_mask;
775 udp.val.dst_port = spec->hdr.dst_port;
776 udp.val.src_port = spec->hdr.src_port;
777 udp.mask.dst_port = mask->hdr.dst_port;
778 udp.mask.src_port = mask->hdr.src_port;
779 /* Remove unwanted bits from values. */
780 udp.val.src_port &= udp.mask.src_port;
781 udp.val.dst_port &= udp.mask.dst_port;
784 while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
786 if (!(udp.val.dst_port & udp.mask.dst_port)) {
787 switch ((item)->type) {
788 case RTE_FLOW_ITEM_TYPE_VXLAN:
789 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
790 udp.mask.dst_port = 0xffff;
792 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
793 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
794 udp.mask.dst_port = 0xffff;
796 case RTE_FLOW_ITEM_TYPE_MPLS:
797 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
798 udp.mask.dst_port = 0xffff;
805 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
809 * Convert the @p item into a Verbs specification. This function assumes that
810 * the input is valid and that there is space to insert the requested item
813 * @param[in, out] dev_flow
814 * Pointer to dev_flow structure.
816 * Item specification.
817 * @param[in] item_flags
821 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
822 const struct rte_flow_item *item,
823 uint64_t item_flags __rte_unused)
825 const struct rte_flow_item_vxlan *spec = item->spec;
826 const struct rte_flow_item_vxlan *mask = item->mask;
827 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
828 struct ibv_flow_spec_tunnel vxlan = {
829 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
835 } id = { .vlan_id = 0, };
838 mask = &rte_flow_item_vxlan_mask;
840 memcpy(&id.vni[1], spec->vni, 3);
841 vxlan.val.tunnel_id = id.vlan_id;
842 memcpy(&id.vni[1], mask->vni, 3);
843 vxlan.mask.tunnel_id = id.vlan_id;
844 /* Remove unwanted bits from values. */
845 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
847 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
851 * Convert the @p item into a Verbs specification. This function assumes that
852 * the input is valid and that there is space to insert the requested item
855 * @param[in, out] dev_flow
856 * Pointer to dev_flow structure.
858 * Item specification.
859 * @param[in] item_flags
863 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
864 const struct rte_flow_item *item,
865 uint64_t item_flags __rte_unused)
867 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
868 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
869 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
870 struct ibv_flow_spec_tunnel vxlan_gpe = {
871 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
877 } id = { .vlan_id = 0, };
880 mask = &rte_flow_item_vxlan_gpe_mask;
882 memcpy(&id.vni[1], spec->vni, 3);
883 vxlan_gpe.val.tunnel_id = id.vlan_id;
884 memcpy(&id.vni[1], mask->vni, 3);
885 vxlan_gpe.mask.tunnel_id = id.vlan_id;
886 /* Remove unwanted bits from values. */
887 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
889 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
893 * Update the protocol in Verbs IPv4/IPv6 spec.
895 * @param[in, out] attr
896 * Pointer to Verbs attributes structure.
898 * Specification type to search in order to update the IP protocol.
899 * @param[in] protocol
900 * Protocol value to set if none is present in the specification.
903 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
904 enum ibv_flow_spec_type search,
908 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
909 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
913 for (i = 0; i != attr->num_of_specs; ++i) {
914 if (hdr->type == search) {
916 struct ibv_flow_spec_ipv4_ext *ipv4;
917 struct ibv_flow_spec_ipv6 *ipv6;
921 case IBV_FLOW_SPEC_IPV4_EXT:
922 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
923 if (!ip.ipv4->val.proto) {
924 ip.ipv4->val.proto = protocol;
925 ip.ipv4->mask.proto = 0xff;
928 case IBV_FLOW_SPEC_IPV6:
929 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
930 if (!ip.ipv6->val.next_hdr) {
931 ip.ipv6->val.next_hdr = protocol;
932 ip.ipv6->mask.next_hdr = 0xff;
940 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
945 * Convert the @p item into a Verbs specification. This function assumes that
946 * the input is valid and that there is space to insert the requested item
949 * @param[in, out] dev_flow
950 * Pointer to dev_flow structure.
952 * Item specification.
953 * @param[in] item_flags
957 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
958 const struct rte_flow_item *item __rte_unused,
961 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
962 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
963 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
964 struct ibv_flow_spec_tunnel tunnel = {
965 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
969 const struct rte_flow_item_gre *spec = item->spec;
970 const struct rte_flow_item_gre *mask = item->mask;
971 unsigned int size = sizeof(struct ibv_flow_spec_gre);
972 struct ibv_flow_spec_gre tunnel = {
973 .type = IBV_FLOW_SPEC_GRE,
978 mask = &rte_flow_item_gre_mask;
980 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
981 tunnel.val.protocol = spec->protocol;
982 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
983 tunnel.mask.protocol = mask->protocol;
984 /* Remove unwanted bits from values. */
985 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
986 tunnel.val.protocol &= tunnel.mask.protocol;
987 tunnel.val.key &= tunnel.mask.key;
990 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
991 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
992 IBV_FLOW_SPEC_IPV4_EXT,
995 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
998 flow_verbs_spec_add(verbs, &tunnel, size);
1002 * Convert the @p action into a Verbs specification. This function assumes that
1003 * the input is valid and that there is space to insert the requested action
1004 * into the flow. This function also return the action that was added.
1006 * @param[in, out] dev_flow
1007 * Pointer to dev_flow structure.
1009 * Item specification.
1010 * @param[in] item_flags
1011 * Parsed item flags.
1014 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1015 const struct rte_flow_item *item __rte_unused,
1016 uint64_t item_flags __rte_unused)
1018 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1019 const struct rte_flow_item_mpls *spec = item->spec;
1020 const struct rte_flow_item_mpls *mask = item->mask;
1021 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1022 struct ibv_flow_spec_mpls mpls = {
1023 .type = IBV_FLOW_SPEC_MPLS,
1028 mask = &rte_flow_item_mpls_mask;
1030 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1031 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1032 /* Remove unwanted bits from values. */
1033 mpls.val.label &= mpls.mask.label;
1035 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1040 * Convert the @p action into a Verbs specification. This function assumes that
1041 * the input is valid and that there is space to insert the requested action
1044 * @param[in] dev_flow
1045 * Pointer to mlx5_flow.
1047 * Action configuration.
1050 flow_verbs_translate_action_drop
1051 (struct mlx5_flow *dev_flow,
1052 const struct rte_flow_action *action __rte_unused)
1054 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1055 struct ibv_flow_spec_action_drop drop = {
1056 .type = IBV_FLOW_SPEC_ACTION_DROP,
1060 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1064 * Convert the @p action into a Verbs specification. This function assumes that
1065 * the input is valid and that there is space to insert the requested action
1068 * @param[in] rss_desc
1069 * Pointer to mlx5_flow_rss_desc.
1071 * Action configuration.
1074 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1075 const struct rte_flow_action *action)
1077 const struct rte_flow_action_queue *queue = action->conf;
1079 rss_desc->queue[0] = queue->index;
1080 rss_desc->queue_num = 1;
1084 * Convert the @p action into a Verbs specification. This function assumes that
1085 * the input is valid and that there is space to insert the requested action
1088 * @param[in] rss_desc
1089 * Pointer to mlx5_flow_rss_desc.
1091 * Action configuration.
1094 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1095 const struct rte_flow_action *action)
1097 const struct rte_flow_action_rss *rss = action->conf;
1098 const uint8_t *rss_key;
1100 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1101 rss_desc->queue_num = rss->queue_num;
1102 /* NULL RSS key indicates default RSS key. */
1103 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1104 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1106 * rss->level and rss.types should be set in advance when expanding
1112 * Convert the @p action into a Verbs specification. This function assumes that
1113 * the input is valid and that there is space to insert the requested action
1116 * @param[in] dev_flow
1117 * Pointer to mlx5_flow.
1119 * Action configuration.
1122 flow_verbs_translate_action_flag
1123 (struct mlx5_flow *dev_flow,
1124 const struct rte_flow_action *action __rte_unused)
1126 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1127 struct ibv_flow_spec_action_tag tag = {
1128 .type = IBV_FLOW_SPEC_ACTION_TAG,
1130 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1133 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1137 * Convert the @p action into a Verbs specification. This function assumes that
1138 * the input is valid and that there is space to insert the requested action
1141 * @param[in] dev_flow
1142 * Pointer to mlx5_flow.
1144 * Action configuration.
1147 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1148 const struct rte_flow_action *action)
1150 const struct rte_flow_action_mark *mark = action->conf;
1151 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1152 struct ibv_flow_spec_action_tag tag = {
1153 .type = IBV_FLOW_SPEC_ACTION_TAG,
1155 .tag_id = mlx5_flow_mark_set(mark->id),
1158 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1162 * Convert the @p action into a Verbs specification. This function assumes that
1163 * the input is valid and that there is space to insert the requested action
1167 * Pointer to the Ethernet device structure.
1169 * Action configuration.
1170 * @param[in] dev_flow
1171 * Pointer to mlx5_flow.
1173 * Pointer to error structure.
1176 * 0 On success else a negative errno value is returned and rte_errno is set.
1179 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1180 const struct rte_flow_action *action,
1181 struct rte_eth_dev *dev,
1182 struct rte_flow_error *error)
1184 const struct rte_flow_action_count *count = action->conf;
1185 struct rte_flow *flow = dev_flow->flow;
1186 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1187 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1188 struct mlx5_flow_counter_pool *pool;
1189 struct mlx5_flow_counter *cnt = NULL;
1190 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1191 struct ibv_flow_spec_counter_action counter = {
1192 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1197 if (!flow->counter) {
1198 flow->counter = flow_verbs_counter_new(dev, count->shared,
1201 return rte_flow_error_set(error, rte_errno,
1202 RTE_FLOW_ERROR_TYPE_ACTION,
1204 "cannot get counter"
1207 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1208 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1209 counter.counter_set_handle =
1210 ((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1211 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1212 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1213 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1214 counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1215 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1221 * Internal validation function. For validating both actions and items.
1224 * Pointer to the Ethernet device structure.
1226 * Pointer to the flow attributes.
1228 * Pointer to the list of items.
1229 * @param[in] actions
1230 * Pointer to the list of actions.
1231 * @param[in] external
1232 * This flow rule is created by request external to PMD.
1233 * @param[in] hairpin
1234 * Number of hairpin TX actions, 0 means classic flow.
1236 * Pointer to the error structure.
1239 * 0 on success, a negative errno value otherwise and rte_errno is set.
1242 flow_verbs_validate(struct rte_eth_dev *dev,
1243 const struct rte_flow_attr *attr,
1244 const struct rte_flow_item items[],
1245 const struct rte_flow_action actions[],
1246 bool external __rte_unused,
1247 int hairpin __rte_unused,
1248 struct rte_flow_error *error)
1251 uint64_t action_flags = 0;
1252 uint64_t item_flags = 0;
1253 uint64_t last_item = 0;
1254 uint8_t next_protocol = 0xff;
1255 uint16_t ether_type = 0;
1259 ret = mlx5_flow_validate_attributes(dev, attr, error);
1262 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1263 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1266 switch (items->type) {
1267 case RTE_FLOW_ITEM_TYPE_VOID:
1269 case RTE_FLOW_ITEM_TYPE_ETH:
1270 ret = mlx5_flow_validate_item_eth(items, item_flags,
1274 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1275 MLX5_FLOW_LAYER_OUTER_L2;
1276 if (items->mask != NULL && items->spec != NULL) {
1278 ((const struct rte_flow_item_eth *)
1281 ((const struct rte_flow_item_eth *)
1283 ether_type = rte_be_to_cpu_16(ether_type);
1288 case RTE_FLOW_ITEM_TYPE_VLAN:
1289 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1293 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1294 MLX5_FLOW_LAYER_INNER_VLAN) :
1295 (MLX5_FLOW_LAYER_OUTER_L2 |
1296 MLX5_FLOW_LAYER_OUTER_VLAN);
1297 if (items->mask != NULL && items->spec != NULL) {
1299 ((const struct rte_flow_item_vlan *)
1300 items->spec)->inner_type;
1302 ((const struct rte_flow_item_vlan *)
1303 items->mask)->inner_type;
1304 ether_type = rte_be_to_cpu_16(ether_type);
1309 case RTE_FLOW_ITEM_TYPE_IPV4:
1310 ret = mlx5_flow_validate_item_ipv4
1312 last_item, ether_type, NULL,
1313 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1317 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1318 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1319 if (items->mask != NULL &&
1320 ((const struct rte_flow_item_ipv4 *)
1321 items->mask)->hdr.next_proto_id) {
1323 ((const struct rte_flow_item_ipv4 *)
1324 (items->spec))->hdr.next_proto_id;
1326 ((const struct rte_flow_item_ipv4 *)
1327 (items->mask))->hdr.next_proto_id;
1329 /* Reset for inner layer. */
1330 next_protocol = 0xff;
1333 case RTE_FLOW_ITEM_TYPE_IPV6:
1334 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1340 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1341 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1342 if (items->mask != NULL &&
1343 ((const struct rte_flow_item_ipv6 *)
1344 items->mask)->hdr.proto) {
1346 ((const struct rte_flow_item_ipv6 *)
1347 items->spec)->hdr.proto;
1349 ((const struct rte_flow_item_ipv6 *)
1350 items->mask)->hdr.proto;
1352 /* Reset for inner layer. */
1353 next_protocol = 0xff;
1356 case RTE_FLOW_ITEM_TYPE_UDP:
1357 ret = mlx5_flow_validate_item_udp(items, item_flags,
1362 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1363 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1365 case RTE_FLOW_ITEM_TYPE_TCP:
1366 ret = mlx5_flow_validate_item_tcp
1369 &rte_flow_item_tcp_mask,
1373 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1374 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1376 case RTE_FLOW_ITEM_TYPE_VXLAN:
1377 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1381 last_item = MLX5_FLOW_LAYER_VXLAN;
1383 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1384 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1389 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1391 case RTE_FLOW_ITEM_TYPE_GRE:
1392 ret = mlx5_flow_validate_item_gre(items, item_flags,
1393 next_protocol, error);
1396 last_item = MLX5_FLOW_LAYER_GRE;
1398 case RTE_FLOW_ITEM_TYPE_MPLS:
1399 ret = mlx5_flow_validate_item_mpls(dev, items,
1404 last_item = MLX5_FLOW_LAYER_MPLS;
1406 case RTE_FLOW_ITEM_TYPE_ICMP:
1407 case RTE_FLOW_ITEM_TYPE_ICMP6:
1408 return rte_flow_error_set(error, ENOTSUP,
1409 RTE_FLOW_ERROR_TYPE_ITEM,
1411 "item not supported");
1413 return rte_flow_error_set(error, ENOTSUP,
1414 RTE_FLOW_ERROR_TYPE_ITEM,
1415 NULL, "item not supported");
1417 item_flags |= last_item;
1419 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1420 switch (actions->type) {
1421 case RTE_FLOW_ACTION_TYPE_VOID:
1423 case RTE_FLOW_ACTION_TYPE_FLAG:
1424 ret = mlx5_flow_validate_action_flag(action_flags,
1429 action_flags |= MLX5_FLOW_ACTION_FLAG;
1431 case RTE_FLOW_ACTION_TYPE_MARK:
1432 ret = mlx5_flow_validate_action_mark(actions,
1438 action_flags |= MLX5_FLOW_ACTION_MARK;
1440 case RTE_FLOW_ACTION_TYPE_DROP:
1441 ret = mlx5_flow_validate_action_drop(action_flags,
1446 action_flags |= MLX5_FLOW_ACTION_DROP;
1448 case RTE_FLOW_ACTION_TYPE_QUEUE:
1449 ret = mlx5_flow_validate_action_queue(actions,
1455 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1457 case RTE_FLOW_ACTION_TYPE_RSS:
1458 ret = mlx5_flow_validate_action_rss(actions,
1464 action_flags |= MLX5_FLOW_ACTION_RSS;
1466 case RTE_FLOW_ACTION_TYPE_COUNT:
1467 ret = mlx5_flow_validate_action_count(dev, attr, error);
1470 action_flags |= MLX5_FLOW_ACTION_COUNT;
1473 return rte_flow_error_set(error, ENOTSUP,
1474 RTE_FLOW_ERROR_TYPE_ACTION,
1476 "action not supported");
1480 * Validate the drop action mutual exclusion with other actions.
1481 * Drop action is mutually-exclusive with any other action, except for
1484 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1485 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1486 return rte_flow_error_set(error, EINVAL,
1487 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1488 "Drop action is mutually-exclusive "
1489 "with any other action, except for "
1491 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1492 return rte_flow_error_set(error, EINVAL,
1493 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1494 "no fate action is found");
1499 * Calculate the required bytes that are needed for the action part of the verbs
1502 * @param[in] actions
1503 * Pointer to the list of actions.
1506 * The size of the memory needed for all actions.
1509 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1513 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1514 switch (actions->type) {
1515 case RTE_FLOW_ACTION_TYPE_VOID:
1517 case RTE_FLOW_ACTION_TYPE_FLAG:
1518 size += sizeof(struct ibv_flow_spec_action_tag);
1520 case RTE_FLOW_ACTION_TYPE_MARK:
1521 size += sizeof(struct ibv_flow_spec_action_tag);
1523 case RTE_FLOW_ACTION_TYPE_DROP:
1524 size += sizeof(struct ibv_flow_spec_action_drop);
1526 case RTE_FLOW_ACTION_TYPE_QUEUE:
1528 case RTE_FLOW_ACTION_TYPE_RSS:
1530 case RTE_FLOW_ACTION_TYPE_COUNT:
1531 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1532 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1533 size += sizeof(struct ibv_flow_spec_counter_action);
1544 * Calculate the required bytes that are needed for the item part of the verbs
1548 * Pointer to the list of items.
1551 * The size of the memory needed for all items.
1554 flow_verbs_get_items_size(const struct rte_flow_item items[])
1558 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1559 switch (items->type) {
1560 case RTE_FLOW_ITEM_TYPE_VOID:
1562 case RTE_FLOW_ITEM_TYPE_ETH:
1563 size += sizeof(struct ibv_flow_spec_eth);
1565 case RTE_FLOW_ITEM_TYPE_VLAN:
1566 size += sizeof(struct ibv_flow_spec_eth);
1568 case RTE_FLOW_ITEM_TYPE_IPV4:
1569 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1571 case RTE_FLOW_ITEM_TYPE_IPV6:
1572 size += sizeof(struct ibv_flow_spec_ipv6);
1574 case RTE_FLOW_ITEM_TYPE_UDP:
1575 size += sizeof(struct ibv_flow_spec_tcp_udp);
1577 case RTE_FLOW_ITEM_TYPE_TCP:
1578 size += sizeof(struct ibv_flow_spec_tcp_udp);
1580 case RTE_FLOW_ITEM_TYPE_VXLAN:
1581 size += sizeof(struct ibv_flow_spec_tunnel);
1583 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1584 size += sizeof(struct ibv_flow_spec_tunnel);
1586 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1587 case RTE_FLOW_ITEM_TYPE_GRE:
1588 size += sizeof(struct ibv_flow_spec_gre);
1590 case RTE_FLOW_ITEM_TYPE_MPLS:
1591 size += sizeof(struct ibv_flow_spec_mpls);
1594 case RTE_FLOW_ITEM_TYPE_GRE:
1595 size += sizeof(struct ibv_flow_spec_tunnel);
1606 * Internal preparation function. Allocate mlx5_flow with the required size.
1607 * The required size is calculate based on the actions and items. This function
1608 * also returns the detected actions and items for later use.
1611 * Pointer to Ethernet device.
1613 * Pointer to the flow attributes.
1615 * Pointer to the list of items.
1616 * @param[in] actions
1617 * Pointer to the list of actions.
1619 * Pointer to the error structure.
1622 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1625 static struct mlx5_flow *
1626 flow_verbs_prepare(struct rte_eth_dev *dev,
1627 const struct rte_flow_attr *attr __rte_unused,
1628 const struct rte_flow_item items[],
1629 const struct rte_flow_action actions[],
1630 struct rte_flow_error *error)
1633 uint32_t handle_idx = 0;
1634 struct mlx5_flow *dev_flow;
1635 struct mlx5_flow_handle *dev_handle;
1636 struct mlx5_priv *priv = dev->data->dev_private;
1637 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1640 size += flow_verbs_get_actions_size(actions);
1641 size += flow_verbs_get_items_size(items);
1642 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1643 rte_flow_error_set(error, E2BIG,
1644 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1645 "Verbs spec/action size too large");
1648 /* In case of corrupting the memory. */
1649 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1650 rte_flow_error_set(error, ENOSPC,
1651 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1652 "not free temporary device flow");
1655 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1658 rte_flow_error_set(error, ENOMEM,
1659 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1660 "not enough memory to create flow handle");
1663 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1664 dev_flow = &wks->flows[wks->flow_idx++];
1665 dev_flow->handle = dev_handle;
1666 dev_flow->handle_idx = handle_idx;
1667 /* Memcpy is used, only size needs to be cleared to 0. */
1668 dev_flow->verbs.size = 0;
1669 dev_flow->verbs.attr.num_of_specs = 0;
1670 dev_flow->ingress = attr->ingress;
1671 dev_flow->hash_fields = 0;
1672 /* Need to set transfer attribute: not supported in Verbs mode. */
1677 * Fill the flow with verb spec.
1680 * Pointer to Ethernet device.
1681 * @param[in, out] dev_flow
1682 * Pointer to the mlx5 flow.
1684 * Pointer to the flow attributes.
1686 * Pointer to the list of items.
1687 * @param[in] actions
1688 * Pointer to the list of actions.
1690 * Pointer to the error structure.
1693 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1696 flow_verbs_translate(struct rte_eth_dev *dev,
1697 struct mlx5_flow *dev_flow,
1698 const struct rte_flow_attr *attr,
1699 const struct rte_flow_item items[],
1700 const struct rte_flow_action actions[],
1701 struct rte_flow_error *error)
1703 uint64_t item_flags = 0;
1704 uint64_t action_flags = 0;
1705 uint64_t priority = attr->priority;
1706 uint32_t subpriority = 0;
1707 struct mlx5_priv *priv = dev->data->dev_private;
1708 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1709 struct mlx5_flow_rss_desc *rss_desc;
1712 rss_desc = &wks->rss_desc;
1713 if (priority == MLX5_FLOW_PRIO_RSVD)
1714 priority = priv->config.flow_prio - 1;
1715 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1718 switch (actions->type) {
1719 case RTE_FLOW_ACTION_TYPE_VOID:
1721 case RTE_FLOW_ACTION_TYPE_FLAG:
1722 flow_verbs_translate_action_flag(dev_flow, actions);
1723 action_flags |= MLX5_FLOW_ACTION_FLAG;
1724 dev_flow->handle->mark = 1;
1726 case RTE_FLOW_ACTION_TYPE_MARK:
1727 flow_verbs_translate_action_mark(dev_flow, actions);
1728 action_flags |= MLX5_FLOW_ACTION_MARK;
1729 dev_flow->handle->mark = 1;
1731 case RTE_FLOW_ACTION_TYPE_DROP:
1732 flow_verbs_translate_action_drop(dev_flow, actions);
1733 action_flags |= MLX5_FLOW_ACTION_DROP;
1734 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1736 case RTE_FLOW_ACTION_TYPE_QUEUE:
1737 flow_verbs_translate_action_queue(rss_desc, actions);
1738 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1739 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1741 case RTE_FLOW_ACTION_TYPE_RSS:
1742 flow_verbs_translate_action_rss(rss_desc, actions);
1743 action_flags |= MLX5_FLOW_ACTION_RSS;
1744 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1746 case RTE_FLOW_ACTION_TYPE_COUNT:
1747 ret = flow_verbs_translate_action_count(dev_flow,
1752 action_flags |= MLX5_FLOW_ACTION_COUNT;
1755 return rte_flow_error_set(error, ENOTSUP,
1756 RTE_FLOW_ERROR_TYPE_ACTION,
1758 "action not supported");
1761 dev_flow->act_flags = action_flags;
1762 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1763 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1765 switch (items->type) {
1766 case RTE_FLOW_ITEM_TYPE_VOID:
1768 case RTE_FLOW_ITEM_TYPE_ETH:
1769 flow_verbs_translate_item_eth(dev_flow, items,
1771 subpriority = MLX5_PRIORITY_MAP_L2;
1772 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1773 MLX5_FLOW_LAYER_OUTER_L2;
1775 case RTE_FLOW_ITEM_TYPE_VLAN:
1776 flow_verbs_translate_item_vlan(dev_flow, items,
1778 subpriority = MLX5_PRIORITY_MAP_L2;
1779 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1780 MLX5_FLOW_LAYER_INNER_VLAN) :
1781 (MLX5_FLOW_LAYER_OUTER_L2 |
1782 MLX5_FLOW_LAYER_OUTER_VLAN);
1784 case RTE_FLOW_ITEM_TYPE_IPV4:
1785 flow_verbs_translate_item_ipv4(dev_flow, items,
1787 subpriority = MLX5_PRIORITY_MAP_L3;
1788 dev_flow->hash_fields |=
1789 mlx5_flow_hashfields_adjust
1791 MLX5_IPV4_LAYER_TYPES,
1792 MLX5_IPV4_IBV_RX_HASH);
1793 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1794 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1796 case RTE_FLOW_ITEM_TYPE_IPV6:
1797 flow_verbs_translate_item_ipv6(dev_flow, items,
1799 subpriority = MLX5_PRIORITY_MAP_L3;
1800 dev_flow->hash_fields |=
1801 mlx5_flow_hashfields_adjust
1803 MLX5_IPV6_LAYER_TYPES,
1804 MLX5_IPV6_IBV_RX_HASH);
1805 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1806 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1808 case RTE_FLOW_ITEM_TYPE_TCP:
1809 flow_verbs_translate_item_tcp(dev_flow, items,
1811 subpriority = MLX5_PRIORITY_MAP_L4;
1812 dev_flow->hash_fields |=
1813 mlx5_flow_hashfields_adjust
1814 (rss_desc, tunnel, ETH_RSS_TCP,
1815 (IBV_RX_HASH_SRC_PORT_TCP |
1816 IBV_RX_HASH_DST_PORT_TCP));
1817 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1818 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1820 case RTE_FLOW_ITEM_TYPE_UDP:
1821 flow_verbs_translate_item_udp(dev_flow, items,
1823 subpriority = MLX5_PRIORITY_MAP_L4;
1824 dev_flow->hash_fields |=
1825 mlx5_flow_hashfields_adjust
1826 (rss_desc, tunnel, ETH_RSS_UDP,
1827 (IBV_RX_HASH_SRC_PORT_UDP |
1828 IBV_RX_HASH_DST_PORT_UDP));
1829 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1830 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1832 case RTE_FLOW_ITEM_TYPE_VXLAN:
1833 flow_verbs_translate_item_vxlan(dev_flow, items,
1835 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1836 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1838 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1839 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1841 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1842 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1844 case RTE_FLOW_ITEM_TYPE_GRE:
1845 flow_verbs_translate_item_gre(dev_flow, items,
1847 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1848 item_flags |= MLX5_FLOW_LAYER_GRE;
1850 case RTE_FLOW_ITEM_TYPE_MPLS:
1851 flow_verbs_translate_item_mpls(dev_flow, items,
1853 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1854 item_flags |= MLX5_FLOW_LAYER_MPLS;
1857 return rte_flow_error_set(error, ENOTSUP,
1858 RTE_FLOW_ERROR_TYPE_ITEM,
1859 NULL, "item not supported");
1862 dev_flow->handle->layers = item_flags;
1863 /* Other members of attr will be ignored. */
1864 dev_flow->verbs.attr.priority =
1865 mlx5_flow_adjust_priority(dev, priority, subpriority);
1866 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1871 * Remove the flow from the NIC but keeps it in memory.
1874 * Pointer to the Ethernet device structure.
1875 * @param[in, out] flow
1876 * Pointer to flow structure.
1879 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1881 struct mlx5_priv *priv = dev->data->dev_private;
1882 struct mlx5_flow_handle *handle;
1883 uint32_t handle_idx;
1887 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1888 handle_idx, handle, next) {
1889 if (handle->drv_flow) {
1890 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1891 handle->drv_flow = NULL;
1893 /* hrxq is union, don't touch it only the flag is set. */
1894 if (handle->rix_hrxq &&
1895 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1896 mlx5_hrxq_release(dev, handle->rix_hrxq);
1897 handle->rix_hrxq = 0;
1899 if (handle->vf_vlan.tag && handle->vf_vlan.created)
1900 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1905 * Remove the flow from the NIC and the memory.
1908 * Pointer to the Ethernet device structure.
1909 * @param[in, out] flow
1910 * Pointer to flow structure.
1913 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1915 struct mlx5_priv *priv = dev->data->dev_private;
1916 struct mlx5_flow_handle *handle;
1920 flow_verbs_remove(dev, flow);
1921 while (flow->dev_handles) {
1922 uint32_t tmp_idx = flow->dev_handles;
1924 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1928 flow->dev_handles = handle->next.next;
1929 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1932 if (flow->counter) {
1933 flow_verbs_counter_release(dev, flow->counter);
1939 * Apply the flow to the NIC.
1942 * Pointer to the Ethernet device structure.
1943 * @param[in, out] flow
1944 * Pointer to flow structure.
1946 * Pointer to error structure.
1949 * 0 on success, a negative errno value otherwise and rte_errno is set.
1952 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1953 struct rte_flow_error *error)
1955 struct mlx5_priv *priv = dev->data->dev_private;
1956 struct mlx5_flow_handle *handle;
1957 struct mlx5_flow *dev_flow;
1958 struct mlx5_hrxq *hrxq;
1959 uint32_t dev_handles;
1962 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1965 for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1966 dev_flow = &wks->flows[idx];
1967 handle = dev_flow->handle;
1968 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1969 MLX5_ASSERT(priv->drop_queue.hrxq);
1970 hrxq = priv->drop_queue.hrxq;
1973 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1975 MLX5_ASSERT(rss_desc->queue_num);
1976 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1977 rss_desc->hash_fields = dev_flow->hash_fields;
1978 rss_desc->tunnel = !!(handle->layers &
1979 MLX5_FLOW_LAYER_TUNNEL);
1980 rss_desc->shared_rss = 0;
1981 hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
1982 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1987 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1988 "cannot get hash queue");
1991 handle->rix_hrxq = hrxq_idx;
1994 handle->drv_flow = mlx5_glue->create_flow
1995 (hrxq->qp, &dev_flow->verbs.attr);
1996 if (!handle->drv_flow) {
1997 rte_flow_error_set(error, errno,
1998 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2000 "hardware refuses to create flow");
2003 if (priv->vmwa_context &&
2004 handle->vf_vlan.tag && !handle->vf_vlan.created) {
2006 * The rule contains the VLAN pattern.
2007 * For VF we are going to create VLAN
2008 * interface to make hypervisor set correct
2009 * e-Switch vport context.
2011 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2016 err = rte_errno; /* Save rte_errno before cleanup. */
2017 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2018 dev_handles, handle, next) {
2019 /* hrxq is union, don't touch it only the flag is set. */
2020 if (handle->rix_hrxq &&
2021 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2022 mlx5_hrxq_release(dev, handle->rix_hrxq);
2023 handle->rix_hrxq = 0;
2025 if (handle->vf_vlan.tag && handle->vf_vlan.created)
2026 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2028 rte_errno = err; /* Restore rte_errno. */
2035 * @see rte_flow_query()
2039 flow_verbs_query(struct rte_eth_dev *dev,
2040 struct rte_flow *flow,
2041 const struct rte_flow_action *actions,
2043 struct rte_flow_error *error)
2047 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2048 switch (actions->type) {
2049 case RTE_FLOW_ACTION_TYPE_VOID:
2051 case RTE_FLOW_ACTION_TYPE_COUNT:
2052 ret = flow_verbs_counter_query(dev, flow, data, error);
2055 return rte_flow_error_set(error, ENOTSUP,
2056 RTE_FLOW_ERROR_TYPE_ACTION,
2058 "action not supported");
2065 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2069 RTE_SET_USED(domains);
2070 RTE_SET_USED(flags);
2075 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2076 .validate = flow_verbs_validate,
2077 .prepare = flow_verbs_prepare,
2078 .translate = flow_verbs_translate,
2079 .apply = flow_verbs_apply,
2080 .remove = flow_verbs_remove,
2081 .destroy = flow_verbs_destroy,
2082 .query = flow_verbs_query,
2083 .sync_domain = flow_verbs_sync_domain,