1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
19 #include <mlx5_glue.h>
21 #include <mlx5_malloc.h>
23 #include "mlx5_defs.h"
25 #include "mlx5_flow.h"
28 #define VERBS_SPEC_INNER(item_flags) \
29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 { 9, 10, 11 }, { 12, 13, 14 },
42 /* Verbs specification header. */
43 struct ibv_spec_header {
44 enum ibv_flow_spec_type type;
49 * Discover the maximum number of priority available.
52 * Pointer to the Ethernet device structure.
55 * number of supported flow priority on success, a negative errno
56 * value otherwise and rte_errno is set.
59 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
61 struct mlx5_priv *priv = dev->data->dev_private;
63 struct ibv_flow_attr attr;
64 struct ibv_flow_spec_eth eth;
65 struct ibv_flow_spec_action_drop drop;
69 .port = (uint8_t)priv->dev_port,
72 .type = IBV_FLOW_SPEC_ETH,
73 .size = sizeof(struct ibv_flow_spec_eth),
76 .size = sizeof(struct ibv_flow_spec_action_drop),
77 .type = IBV_FLOW_SPEC_ACTION_DROP,
80 struct ibv_flow *flow;
81 struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
82 uint16_t vprio[] = { 8, 16 };
90 for (i = 0; i != RTE_DIM(vprio); i++) {
91 flow_attr.attr.priority = vprio[i] - 1;
92 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
95 claim_zero(mlx5_glue->destroy_flow(flow));
100 priority = RTE_DIM(priority_map_3);
103 priority = RTE_DIM(priority_map_5);
108 "port %u verbs maximum priority: %d expected 8/16",
109 dev->data->port_id, priority);
112 DRV_LOG(INFO, "port %u supported flow priorities:"
113 " 0-%d for ingress or egress root table,"
114 " 0-%d for non-root table or transfer root table.",
115 dev->data->port_id, priority - 2,
116 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
121 * Adjust flow priority based on the highest layer and the request priority.
124 * Pointer to the Ethernet device structure.
125 * @param[in] priority
126 * The rule base priority.
127 * @param[in] subpriority
128 * The priority based on the items.
134 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
135 uint32_t subpriority)
138 struct mlx5_priv *priv = dev->data->dev_private;
140 switch (priv->config.flow_prio) {
141 case RTE_DIM(priority_map_3):
142 res = priority_map_3[priority][subpriority];
144 case RTE_DIM(priority_map_5):
145 res = priority_map_5[priority][subpriority];
152 * Get Verbs flow counter by index.
155 * Pointer to the Ethernet device structure.
157 * mlx5 flow counter index in the container.
159 * mlx5 flow counter pool in the container,
162 * A pointer to the counter, NULL otherwise.
164 static struct mlx5_flow_counter *
165 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
167 struct mlx5_flow_counter_pool **ppool)
169 struct mlx5_priv *priv = dev->data->dev_private;
170 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
171 struct mlx5_flow_counter_pool *pool;
173 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
174 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
178 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
182 * Create Verbs flow counter with Verbs library.
185 * Pointer to the Ethernet device structure.
186 * @param[in, out] counter
187 * mlx5 flow counter object, contains the counter id,
188 * handle of created Verbs flow counter is returned
189 * in cs field (if counters are supported).
192 * 0 On success else a negative errno value is returned
193 * and rte_errno is set.
196 flow_verbs_counter_create(struct rte_eth_dev *dev,
197 struct mlx5_flow_counter *counter)
199 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
200 struct mlx5_priv *priv = dev->data->dev_private;
201 struct ibv_context *ctx = priv->sh->ctx;
202 struct ibv_counter_set_init_attr init = {
203 .counter_set_id = counter->shared_info.id};
205 counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
206 if (!counter->dcs_when_free) {
211 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
212 struct mlx5_priv *priv = dev->data->dev_private;
213 struct ibv_context *ctx = priv->sh->ctx;
214 struct ibv_counters_init_attr init = {0};
215 struct ibv_counter_attach_attr attach;
218 memset(&attach, 0, sizeof(attach));
219 counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
220 if (!counter->dcs_when_free) {
224 attach.counter_desc = IBV_COUNTER_PACKETS;
226 ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
228 attach.counter_desc = IBV_COUNTER_BYTES;
230 ret = mlx5_glue->attach_counters
231 (counter->dcs_when_free, &attach, NULL);
234 claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
235 counter->dcs_when_free = NULL;
249 * Get a flow counter.
252 * Pointer to the Ethernet device structure.
254 * Counter identifier.
257 * Index to the counter, 0 otherwise and rte_errno is set.
260 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
262 struct mlx5_priv *priv = dev->data->dev_private;
263 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
264 struct mlx5_flow_counter_pool *pool = NULL;
265 struct mlx5_flow_counter *cnt = NULL;
266 uint32_t n_valid = cmng->n_valid;
267 uint32_t pool_idx, cnt_idx;
271 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
272 pool = cmng->pools[pool_idx];
275 cnt = TAILQ_FIRST(&pool->counters[0]);
280 struct mlx5_flow_counter_pool **pools;
283 if (n_valid == cmng->n) {
284 /* Resize the container pool array. */
285 size = sizeof(struct mlx5_flow_counter_pool *) *
286 (n_valid + MLX5_CNT_CONTAINER_RESIZE);
287 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
292 memcpy(pools, cmng->pools,
293 sizeof(struct mlx5_flow_counter_pool *) *
295 mlx5_free(cmng->pools);
298 cmng->n += MLX5_CNT_CONTAINER_RESIZE;
300 /* Allocate memory for new pool*/
301 size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
302 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
305 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
306 cnt = MLX5_POOL_GET_CNT(pool, i);
307 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
309 cnt = MLX5_POOL_GET_CNT(pool, 0);
310 cmng->pools[n_valid] = pool;
314 TAILQ_REMOVE(&pool->counters[0], cnt, next);
315 i = MLX5_CNT_ARRAY_IDX(pool, cnt);
316 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
317 /* Create counter with Verbs. */
318 ret = flow_verbs_counter_create(dev, cnt);
320 cnt->dcs_when_active = cnt->dcs_when_free;
325 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
326 /* Some error occurred in Verbs library. */
332 * Release a flow counter.
335 * Pointer to the Ethernet device structure.
337 * Index to the counter handler.
340 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
342 struct mlx5_flow_counter_pool *pool;
343 struct mlx5_flow_counter *cnt;
345 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
346 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
347 claim_zero(mlx5_glue->destroy_counter_set
348 ((struct ibv_counter_set *)cnt->dcs_when_active));
349 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
350 claim_zero(mlx5_glue->destroy_counters
351 ((struct ibv_counters *)cnt->dcs_when_active));
353 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
357 * Query a flow counter via Verbs library call.
359 * @see rte_flow_query()
363 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
364 struct rte_flow *flow, void *data,
365 struct rte_flow_error *error)
367 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
368 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
370 struct mlx5_flow_counter_pool *pool;
371 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
372 (dev, flow->counter, &pool);
373 struct rte_flow_query_count *qc = data;
374 uint64_t counters[2] = {0, 0};
375 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
376 struct ibv_query_counter_set_attr query_cs_attr = {
377 .dcs_when_free = (struct ibv_counter_set *)
378 cnt->dcs_when_active,
379 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
381 struct ibv_counter_set_data query_out = {
383 .outlen = 2 * sizeof(uint64_t),
385 int err = mlx5_glue->query_counter_set(&query_cs_attr,
387 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
388 int err = mlx5_glue->query_counters
389 ((struct ibv_counters *)cnt->dcs_when_active, counters,
391 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
394 return rte_flow_error_set
396 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
398 "cannot read counter");
401 qc->hits = counters[0] - cnt->hits;
402 qc->bytes = counters[1] - cnt->bytes;
404 cnt->hits = counters[0];
405 cnt->bytes = counters[1];
409 return rte_flow_error_set(error, EINVAL,
410 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
412 "flow does not have counter");
416 return rte_flow_error_set(error, ENOTSUP,
417 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
419 "counters are not available");
424 * Add a verbs item specification into @p verbs.
427 * Pointer to verbs structure.
429 * Create specification.
431 * Size in bytes of the specification to copy.
434 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
435 void *src, unsigned int size)
441 MLX5_ASSERT(verbs->specs);
442 dst = (void *)(verbs->specs + verbs->size);
443 memcpy(dst, src, size);
444 ++verbs->attr.num_of_specs;
449 * Convert the @p item into a Verbs specification. This function assumes that
450 * the input is valid and that there is space to insert the requested item
453 * @param[in, out] dev_flow
454 * Pointer to dev_flow structure.
456 * Item specification.
457 * @param[in] item_flags
461 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
462 const struct rte_flow_item *item,
465 const struct rte_flow_item_eth *spec = item->spec;
466 const struct rte_flow_item_eth *mask = item->mask;
467 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
468 struct ibv_flow_spec_eth eth = {
469 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
474 mask = &rte_flow_item_eth_mask;
478 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
480 memcpy(ð.val.src_mac, spec->src.addr_bytes,
482 eth.val.ether_type = spec->type;
483 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
485 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
487 eth.mask.ether_type = mask->type;
488 /* Remove unwanted bits from values. */
489 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
490 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
491 eth.val.src_mac[i] &= eth.mask.src_mac[i];
493 eth.val.ether_type &= eth.mask.ether_type;
495 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
499 * Update the VLAN tag in the Verbs Ethernet specification.
500 * This function assumes that the input is valid and there is space to add
501 * the requested item.
503 * @param[in, out] attr
504 * Pointer to Verbs attributes structure.
506 * Verbs structure containing the VLAN information to copy.
509 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
510 struct ibv_flow_spec_eth *eth)
513 const enum ibv_flow_spec_type search = eth->type;
514 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
515 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
517 for (i = 0; i != attr->num_of_specs; ++i) {
518 if (hdr->type == search) {
519 struct ibv_flow_spec_eth *e =
520 (struct ibv_flow_spec_eth *)hdr;
522 e->val.vlan_tag = eth->val.vlan_tag;
523 e->mask.vlan_tag = eth->mask.vlan_tag;
524 e->val.ether_type = eth->val.ether_type;
525 e->mask.ether_type = eth->mask.ether_type;
528 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
533 * Convert the @p item into a Verbs specification. This function assumes that
534 * the input is valid and that there is space to insert the requested item
537 * @param[in, out] dev_flow
538 * Pointer to dev_flow structure.
540 * Item specification.
541 * @param[in] item_flags
545 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
546 const struct rte_flow_item *item,
549 const struct rte_flow_item_vlan *spec = item->spec;
550 const struct rte_flow_item_vlan *mask = item->mask;
551 unsigned int size = sizeof(struct ibv_flow_spec_eth);
552 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
553 struct ibv_flow_spec_eth eth = {
554 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
557 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
558 MLX5_FLOW_LAYER_OUTER_L2;
561 mask = &rte_flow_item_vlan_mask;
563 eth.val.vlan_tag = spec->tci;
564 eth.mask.vlan_tag = mask->tci;
565 eth.val.vlan_tag &= eth.mask.vlan_tag;
566 eth.val.ether_type = spec->inner_type;
567 eth.mask.ether_type = mask->inner_type;
568 eth.val.ether_type &= eth.mask.ether_type;
570 if (!(item_flags & l2m))
571 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
573 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
575 dev_flow->handle->vf_vlan.tag =
576 rte_be_to_cpu_16(spec->tci) & 0x0fff;
580 * Convert the @p item into a Verbs specification. This function assumes that
581 * the input is valid and that there is space to insert the requested item
584 * @param[in, out] dev_flow
585 * Pointer to dev_flow structure.
587 * Item specification.
588 * @param[in] item_flags
592 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
593 const struct rte_flow_item *item,
596 const struct rte_flow_item_ipv4 *spec = item->spec;
597 const struct rte_flow_item_ipv4 *mask = item->mask;
598 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
599 struct ibv_flow_spec_ipv4_ext ipv4 = {
600 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
605 mask = &rte_flow_item_ipv4_mask;
607 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
608 .src_ip = spec->hdr.src_addr,
609 .dst_ip = spec->hdr.dst_addr,
610 .proto = spec->hdr.next_proto_id,
611 .tos = spec->hdr.type_of_service,
613 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
614 .src_ip = mask->hdr.src_addr,
615 .dst_ip = mask->hdr.dst_addr,
616 .proto = mask->hdr.next_proto_id,
617 .tos = mask->hdr.type_of_service,
619 /* Remove unwanted bits from values. */
620 ipv4.val.src_ip &= ipv4.mask.src_ip;
621 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
622 ipv4.val.proto &= ipv4.mask.proto;
623 ipv4.val.tos &= ipv4.mask.tos;
625 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
629 * Convert the @p item into a Verbs specification. This function assumes that
630 * the input is valid and that there is space to insert the requested item
633 * @param[in, out] dev_flow
634 * Pointer to dev_flow structure.
636 * Item specification.
637 * @param[in] item_flags
641 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
642 const struct rte_flow_item *item,
645 const struct rte_flow_item_ipv6 *spec = item->spec;
646 const struct rte_flow_item_ipv6 *mask = item->mask;
647 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
648 struct ibv_flow_spec_ipv6 ipv6 = {
649 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
654 mask = &rte_flow_item_ipv6_mask;
657 uint32_t vtc_flow_val;
658 uint32_t vtc_flow_mask;
660 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
661 RTE_DIM(ipv6.val.src_ip));
662 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
663 RTE_DIM(ipv6.val.dst_ip));
664 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
665 RTE_DIM(ipv6.mask.src_ip));
666 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
667 RTE_DIM(ipv6.mask.dst_ip));
668 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
669 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
670 ipv6.val.flow_label =
671 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
672 RTE_IPV6_HDR_FL_SHIFT);
673 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
674 RTE_IPV6_HDR_TC_SHIFT;
675 ipv6.val.next_hdr = spec->hdr.proto;
676 ipv6.mask.flow_label =
677 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
678 RTE_IPV6_HDR_FL_SHIFT);
679 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
680 RTE_IPV6_HDR_TC_SHIFT;
681 ipv6.mask.next_hdr = mask->hdr.proto;
682 /* Remove unwanted bits from values. */
683 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
684 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
685 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
687 ipv6.val.flow_label &= ipv6.mask.flow_label;
688 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
689 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
691 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
695 * Convert the @p item into a Verbs specification. This function assumes that
696 * the input is valid and that there is space to insert the requested item
699 * @param[in, out] dev_flow
700 * Pointer to dev_flow structure.
702 * Item specification.
703 * @param[in] item_flags
707 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
708 const struct rte_flow_item *item,
709 uint64_t item_flags __rte_unused)
711 const struct rte_flow_item_tcp *spec = item->spec;
712 const struct rte_flow_item_tcp *mask = item->mask;
713 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
714 struct ibv_flow_spec_tcp_udp tcp = {
715 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
720 mask = &rte_flow_item_tcp_mask;
722 tcp.val.dst_port = spec->hdr.dst_port;
723 tcp.val.src_port = spec->hdr.src_port;
724 tcp.mask.dst_port = mask->hdr.dst_port;
725 tcp.mask.src_port = mask->hdr.src_port;
726 /* Remove unwanted bits from values. */
727 tcp.val.src_port &= tcp.mask.src_port;
728 tcp.val.dst_port &= tcp.mask.dst_port;
730 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
734 * Convert the @p item into a Verbs specification. This function assumes that
735 * the input is valid and that there is space to insert the requested item
738 * @param[in, out] dev_flow
739 * Pointer to dev_flow structure.
741 * Item specification.
742 * @param[in] item_flags
746 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
747 const struct rte_flow_item *item,
748 uint64_t item_flags __rte_unused)
750 const struct rte_flow_item_udp *spec = item->spec;
751 const struct rte_flow_item_udp *mask = item->mask;
752 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
753 struct ibv_flow_spec_tcp_udp udp = {
754 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
759 mask = &rte_flow_item_udp_mask;
761 udp.val.dst_port = spec->hdr.dst_port;
762 udp.val.src_port = spec->hdr.src_port;
763 udp.mask.dst_port = mask->hdr.dst_port;
764 udp.mask.src_port = mask->hdr.src_port;
765 /* Remove unwanted bits from values. */
766 udp.val.src_port &= udp.mask.src_port;
767 udp.val.dst_port &= udp.mask.dst_port;
770 while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
772 if (!(udp.val.dst_port & udp.mask.dst_port)) {
773 switch ((item)->type) {
774 case RTE_FLOW_ITEM_TYPE_VXLAN:
775 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
776 udp.mask.dst_port = 0xffff;
778 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
779 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
780 udp.mask.dst_port = 0xffff;
782 case RTE_FLOW_ITEM_TYPE_MPLS:
783 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
784 udp.mask.dst_port = 0xffff;
791 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
795 * Convert the @p item into a Verbs specification. This function assumes that
796 * the input is valid and that there is space to insert the requested item
799 * @param[in, out] dev_flow
800 * Pointer to dev_flow structure.
802 * Item specification.
803 * @param[in] item_flags
807 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
808 const struct rte_flow_item *item,
809 uint64_t item_flags __rte_unused)
811 const struct rte_flow_item_vxlan *spec = item->spec;
812 const struct rte_flow_item_vxlan *mask = item->mask;
813 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
814 struct ibv_flow_spec_tunnel vxlan = {
815 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
821 } id = { .vlan_id = 0, };
824 mask = &rte_flow_item_vxlan_mask;
826 memcpy(&id.vni[1], spec->vni, 3);
827 vxlan.val.tunnel_id = id.vlan_id;
828 memcpy(&id.vni[1], mask->vni, 3);
829 vxlan.mask.tunnel_id = id.vlan_id;
830 /* Remove unwanted bits from values. */
831 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
833 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
837 * Convert the @p item into a Verbs specification. This function assumes that
838 * the input is valid and that there is space to insert the requested item
841 * @param[in, out] dev_flow
842 * Pointer to dev_flow structure.
844 * Item specification.
845 * @param[in] item_flags
849 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
850 const struct rte_flow_item *item,
851 uint64_t item_flags __rte_unused)
853 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
854 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
855 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
856 struct ibv_flow_spec_tunnel vxlan_gpe = {
857 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
863 } id = { .vlan_id = 0, };
866 mask = &rte_flow_item_vxlan_gpe_mask;
868 memcpy(&id.vni[1], spec->vni, 3);
869 vxlan_gpe.val.tunnel_id = id.vlan_id;
870 memcpy(&id.vni[1], mask->vni, 3);
871 vxlan_gpe.mask.tunnel_id = id.vlan_id;
872 /* Remove unwanted bits from values. */
873 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
875 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
879 * Update the protocol in Verbs IPv4/IPv6 spec.
881 * @param[in, out] attr
882 * Pointer to Verbs attributes structure.
884 * Specification type to search in order to update the IP protocol.
885 * @param[in] protocol
886 * Protocol value to set if none is present in the specification.
889 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
890 enum ibv_flow_spec_type search,
894 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
895 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
899 for (i = 0; i != attr->num_of_specs; ++i) {
900 if (hdr->type == search) {
902 struct ibv_flow_spec_ipv4_ext *ipv4;
903 struct ibv_flow_spec_ipv6 *ipv6;
907 case IBV_FLOW_SPEC_IPV4_EXT:
908 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
909 if (!ip.ipv4->val.proto) {
910 ip.ipv4->val.proto = protocol;
911 ip.ipv4->mask.proto = 0xff;
914 case IBV_FLOW_SPEC_IPV6:
915 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
916 if (!ip.ipv6->val.next_hdr) {
917 ip.ipv6->val.next_hdr = protocol;
918 ip.ipv6->mask.next_hdr = 0xff;
926 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
931 * Convert the @p item into a Verbs specification. This function assumes that
932 * the input is valid and that there is space to insert the requested item
935 * @param[in, out] dev_flow
936 * Pointer to dev_flow structure.
938 * Item specification.
939 * @param[in] item_flags
943 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
944 const struct rte_flow_item *item __rte_unused,
947 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
948 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
949 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
950 struct ibv_flow_spec_tunnel tunnel = {
951 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
955 const struct rte_flow_item_gre *spec = item->spec;
956 const struct rte_flow_item_gre *mask = item->mask;
957 unsigned int size = sizeof(struct ibv_flow_spec_gre);
958 struct ibv_flow_spec_gre tunnel = {
959 .type = IBV_FLOW_SPEC_GRE,
964 mask = &rte_flow_item_gre_mask;
966 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
967 tunnel.val.protocol = spec->protocol;
968 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
969 tunnel.mask.protocol = mask->protocol;
970 /* Remove unwanted bits from values. */
971 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
972 tunnel.val.protocol &= tunnel.mask.protocol;
973 tunnel.val.key &= tunnel.mask.key;
976 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
977 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
978 IBV_FLOW_SPEC_IPV4_EXT,
981 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
984 flow_verbs_spec_add(verbs, &tunnel, size);
988 * Convert the @p action into a Verbs specification. This function assumes that
989 * the input is valid and that there is space to insert the requested action
990 * into the flow. This function also return the action that was added.
992 * @param[in, out] dev_flow
993 * Pointer to dev_flow structure.
995 * Item specification.
996 * @param[in] item_flags
1000 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1001 const struct rte_flow_item *item __rte_unused,
1002 uint64_t item_flags __rte_unused)
1004 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1005 const struct rte_flow_item_mpls *spec = item->spec;
1006 const struct rte_flow_item_mpls *mask = item->mask;
1007 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1008 struct ibv_flow_spec_mpls mpls = {
1009 .type = IBV_FLOW_SPEC_MPLS,
1014 mask = &rte_flow_item_mpls_mask;
1016 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1017 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1018 /* Remove unwanted bits from values. */
1019 mpls.val.label &= mpls.mask.label;
1021 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1026 * Convert the @p action into a Verbs specification. This function assumes that
1027 * the input is valid and that there is space to insert the requested action
1030 * @param[in] dev_flow
1031 * Pointer to mlx5_flow.
1033 * Action configuration.
1036 flow_verbs_translate_action_drop
1037 (struct mlx5_flow *dev_flow,
1038 const struct rte_flow_action *action __rte_unused)
1040 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1041 struct ibv_flow_spec_action_drop drop = {
1042 .type = IBV_FLOW_SPEC_ACTION_DROP,
1046 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1050 * Convert the @p action into a Verbs specification. This function assumes that
1051 * the input is valid and that there is space to insert the requested action
1054 * @param[in] rss_desc
1055 * Pointer to mlx5_flow_rss_desc.
1057 * Action configuration.
1060 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1061 const struct rte_flow_action *action)
1063 const struct rte_flow_action_queue *queue = action->conf;
1065 rss_desc->queue[0] = queue->index;
1066 rss_desc->queue_num = 1;
1070 * Convert the @p action into a Verbs specification. This function assumes that
1071 * the input is valid and that there is space to insert the requested action
1074 * @param[in] rss_desc
1075 * Pointer to mlx5_flow_rss_desc.
1077 * Action configuration.
1080 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1081 const struct rte_flow_action *action)
1083 const struct rte_flow_action_rss *rss = action->conf;
1084 const uint8_t *rss_key;
1086 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1087 rss_desc->queue_num = rss->queue_num;
1088 /* NULL RSS key indicates default RSS key. */
1089 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1090 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1092 * rss->level and rss.types should be set in advance when expanding
1098 * Convert the @p action into a Verbs specification. This function assumes that
1099 * the input is valid and that there is space to insert the requested action
1102 * @param[in] dev_flow
1103 * Pointer to mlx5_flow.
1105 * Action configuration.
1108 flow_verbs_translate_action_flag
1109 (struct mlx5_flow *dev_flow,
1110 const struct rte_flow_action *action __rte_unused)
1112 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1113 struct ibv_flow_spec_action_tag tag = {
1114 .type = IBV_FLOW_SPEC_ACTION_TAG,
1116 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1119 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1123 * Convert the @p action into a Verbs specification. This function assumes that
1124 * the input is valid and that there is space to insert the requested action
1127 * @param[in] dev_flow
1128 * Pointer to mlx5_flow.
1130 * Action configuration.
1133 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1134 const struct rte_flow_action *action)
1136 const struct rte_flow_action_mark *mark = action->conf;
1137 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1138 struct ibv_flow_spec_action_tag tag = {
1139 .type = IBV_FLOW_SPEC_ACTION_TAG,
1141 .tag_id = mlx5_flow_mark_set(mark->id),
1144 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1148 * Convert the @p action into a Verbs specification. This function assumes that
1149 * the input is valid and that there is space to insert the requested action
1153 * Pointer to the Ethernet device structure.
1155 * Action configuration.
1156 * @param[in] dev_flow
1157 * Pointer to mlx5_flow.
1159 * Pointer to error structure.
1162 * 0 On success else a negative errno value is returned and rte_errno is set.
1165 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1166 const struct rte_flow_action *action,
1167 struct rte_eth_dev *dev,
1168 struct rte_flow_error *error)
1170 const struct rte_flow_action_count *count = action->conf;
1171 struct rte_flow *flow = dev_flow->flow;
1172 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1173 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1174 struct mlx5_flow_counter_pool *pool;
1175 struct mlx5_flow_counter *cnt = NULL;
1176 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1177 struct ibv_flow_spec_counter_action counter = {
1178 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1183 if (!flow->counter) {
1184 flow->counter = flow_verbs_counter_new(dev, count->id);
1186 return rte_flow_error_set(error, rte_errno,
1187 RTE_FLOW_ERROR_TYPE_ACTION,
1189 "cannot get counter"
1192 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1193 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1194 counter.counter_set_handle =
1195 ((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1196 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1197 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1198 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1199 counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1200 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1206 * Internal validation function. For validating both actions and items.
1209 * Pointer to the Ethernet device structure.
1211 * Pointer to the flow attributes.
1213 * Pointer to the list of items.
1214 * @param[in] actions
1215 * Pointer to the list of actions.
1216 * @param[in] external
1217 * This flow rule is created by request external to PMD.
1218 * @param[in] hairpin
1219 * Number of hairpin TX actions, 0 means classic flow.
1221 * Pointer to the error structure.
1224 * 0 on success, a negative errno value otherwise and rte_errno is set.
1227 flow_verbs_validate(struct rte_eth_dev *dev,
1228 const struct rte_flow_attr *attr,
1229 const struct rte_flow_item items[],
1230 const struct rte_flow_action actions[],
1231 bool external __rte_unused,
1232 int hairpin __rte_unused,
1233 struct rte_flow_error *error)
1236 uint64_t action_flags = 0;
1237 uint64_t item_flags = 0;
1238 uint64_t last_item = 0;
1239 uint8_t next_protocol = 0xff;
1240 uint16_t ether_type = 0;
1241 bool is_empty_vlan = false;
1242 uint16_t udp_dport = 0;
1246 ret = mlx5_flow_validate_attributes(dev, attr, error);
1249 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1250 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1253 switch (items->type) {
1254 case RTE_FLOW_ITEM_TYPE_VOID:
1256 case RTE_FLOW_ITEM_TYPE_ETH:
1257 ret = mlx5_flow_validate_item_eth(items, item_flags,
1261 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1262 MLX5_FLOW_LAYER_OUTER_L2;
1263 if (items->mask != NULL && items->spec != NULL) {
1265 ((const struct rte_flow_item_eth *)
1268 ((const struct rte_flow_item_eth *)
1270 if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1271 is_empty_vlan = true;
1272 ether_type = rte_be_to_cpu_16(ether_type);
1277 case RTE_FLOW_ITEM_TYPE_VLAN:
1278 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1282 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1283 MLX5_FLOW_LAYER_INNER_VLAN) :
1284 (MLX5_FLOW_LAYER_OUTER_L2 |
1285 MLX5_FLOW_LAYER_OUTER_VLAN);
1286 if (items->mask != NULL && items->spec != NULL) {
1288 ((const struct rte_flow_item_vlan *)
1289 items->spec)->inner_type;
1291 ((const struct rte_flow_item_vlan *)
1292 items->mask)->inner_type;
1293 ether_type = rte_be_to_cpu_16(ether_type);
1297 is_empty_vlan = false;
1299 case RTE_FLOW_ITEM_TYPE_IPV4:
1300 ret = mlx5_flow_validate_item_ipv4
1302 last_item, ether_type, NULL,
1303 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1307 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1308 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1309 if (items->mask != NULL &&
1310 ((const struct rte_flow_item_ipv4 *)
1311 items->mask)->hdr.next_proto_id) {
1313 ((const struct rte_flow_item_ipv4 *)
1314 (items->spec))->hdr.next_proto_id;
1316 ((const struct rte_flow_item_ipv4 *)
1317 (items->mask))->hdr.next_proto_id;
1319 /* Reset for inner layer. */
1320 next_protocol = 0xff;
1323 case RTE_FLOW_ITEM_TYPE_IPV6:
1324 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1330 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1331 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1332 if (items->mask != NULL &&
1333 ((const struct rte_flow_item_ipv6 *)
1334 items->mask)->hdr.proto) {
1336 ((const struct rte_flow_item_ipv6 *)
1337 items->spec)->hdr.proto;
1339 ((const struct rte_flow_item_ipv6 *)
1340 items->mask)->hdr.proto;
1342 /* Reset for inner layer. */
1343 next_protocol = 0xff;
1346 case RTE_FLOW_ITEM_TYPE_UDP:
1347 ret = mlx5_flow_validate_item_udp(items, item_flags,
1350 const struct rte_flow_item_udp *spec = items->spec;
1351 const struct rte_flow_item_udp *mask = items->mask;
1353 mask = &rte_flow_item_udp_mask;
1355 udp_dport = rte_be_to_cpu_16
1356 (spec->hdr.dst_port &
1357 mask->hdr.dst_port);
1361 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1362 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1364 case RTE_FLOW_ITEM_TYPE_TCP:
1365 ret = mlx5_flow_validate_item_tcp
1368 &rte_flow_item_tcp_mask,
1372 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1373 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1375 case RTE_FLOW_ITEM_TYPE_VXLAN:
1376 ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
1381 last_item = MLX5_FLOW_LAYER_VXLAN;
1383 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1384 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1389 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1391 case RTE_FLOW_ITEM_TYPE_GRE:
1392 ret = mlx5_flow_validate_item_gre(items, item_flags,
1393 next_protocol, error);
1396 last_item = MLX5_FLOW_LAYER_GRE;
1398 case RTE_FLOW_ITEM_TYPE_MPLS:
1399 ret = mlx5_flow_validate_item_mpls(dev, items,
1404 last_item = MLX5_FLOW_LAYER_MPLS;
1406 case RTE_FLOW_ITEM_TYPE_ICMP:
1407 case RTE_FLOW_ITEM_TYPE_ICMP6:
1408 return rte_flow_error_set(error, ENOTSUP,
1409 RTE_FLOW_ERROR_TYPE_ITEM,
1411 "item not supported");
1413 return rte_flow_error_set(error, ENOTSUP,
1414 RTE_FLOW_ERROR_TYPE_ITEM,
1415 NULL, "item not supported");
1417 item_flags |= last_item;
1420 return rte_flow_error_set(error, ENOTSUP,
1421 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1422 "VLAN matching without vid specification is not supported");
1423 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1424 switch (actions->type) {
1425 case RTE_FLOW_ACTION_TYPE_VOID:
1427 case RTE_FLOW_ACTION_TYPE_FLAG:
1428 ret = mlx5_flow_validate_action_flag(action_flags,
1433 action_flags |= MLX5_FLOW_ACTION_FLAG;
1435 case RTE_FLOW_ACTION_TYPE_MARK:
1436 ret = mlx5_flow_validate_action_mark(actions,
1442 action_flags |= MLX5_FLOW_ACTION_MARK;
1444 case RTE_FLOW_ACTION_TYPE_DROP:
1445 ret = mlx5_flow_validate_action_drop(action_flags,
1450 action_flags |= MLX5_FLOW_ACTION_DROP;
1452 case RTE_FLOW_ACTION_TYPE_QUEUE:
1453 ret = mlx5_flow_validate_action_queue(actions,
1459 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1461 case RTE_FLOW_ACTION_TYPE_RSS:
1462 ret = mlx5_flow_validate_action_rss(actions,
1468 action_flags |= MLX5_FLOW_ACTION_RSS;
1470 case RTE_FLOW_ACTION_TYPE_COUNT:
1471 ret = mlx5_flow_validate_action_count(dev, attr, error);
1474 action_flags |= MLX5_FLOW_ACTION_COUNT;
1477 return rte_flow_error_set(error, ENOTSUP,
1478 RTE_FLOW_ERROR_TYPE_ACTION,
1480 "action not supported");
1484 * Validate the drop action mutual exclusion with other actions.
1485 * Drop action is mutually-exclusive with any other action, except for
1488 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1489 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1490 return rte_flow_error_set(error, EINVAL,
1491 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1492 "Drop action is mutually-exclusive "
1493 "with any other action, except for "
1495 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1496 return rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1498 "no fate action is found");
1503 * Calculate the required bytes that are needed for the action part of the verbs
1506 * @param[in] actions
1507 * Pointer to the list of actions.
1510 * The size of the memory needed for all actions.
1513 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1517 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1518 switch (actions->type) {
1519 case RTE_FLOW_ACTION_TYPE_VOID:
1521 case RTE_FLOW_ACTION_TYPE_FLAG:
1522 size += sizeof(struct ibv_flow_spec_action_tag);
1524 case RTE_FLOW_ACTION_TYPE_MARK:
1525 size += sizeof(struct ibv_flow_spec_action_tag);
1527 case RTE_FLOW_ACTION_TYPE_DROP:
1528 size += sizeof(struct ibv_flow_spec_action_drop);
1530 case RTE_FLOW_ACTION_TYPE_QUEUE:
1532 case RTE_FLOW_ACTION_TYPE_RSS:
1534 case RTE_FLOW_ACTION_TYPE_COUNT:
1535 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1536 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1537 size += sizeof(struct ibv_flow_spec_counter_action);
1548 * Calculate the required bytes that are needed for the item part of the verbs
1552 * Pointer to the list of items.
1555 * The size of the memory needed for all items.
1558 flow_verbs_get_items_size(const struct rte_flow_item items[])
1562 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1563 switch (items->type) {
1564 case RTE_FLOW_ITEM_TYPE_VOID:
1566 case RTE_FLOW_ITEM_TYPE_ETH:
1567 size += sizeof(struct ibv_flow_spec_eth);
1569 case RTE_FLOW_ITEM_TYPE_VLAN:
1570 size += sizeof(struct ibv_flow_spec_eth);
1572 case RTE_FLOW_ITEM_TYPE_IPV4:
1573 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1575 case RTE_FLOW_ITEM_TYPE_IPV6:
1576 size += sizeof(struct ibv_flow_spec_ipv6);
1578 case RTE_FLOW_ITEM_TYPE_UDP:
1579 size += sizeof(struct ibv_flow_spec_tcp_udp);
1581 case RTE_FLOW_ITEM_TYPE_TCP:
1582 size += sizeof(struct ibv_flow_spec_tcp_udp);
1584 case RTE_FLOW_ITEM_TYPE_VXLAN:
1585 size += sizeof(struct ibv_flow_spec_tunnel);
1587 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1588 size += sizeof(struct ibv_flow_spec_tunnel);
1590 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1591 case RTE_FLOW_ITEM_TYPE_GRE:
1592 size += sizeof(struct ibv_flow_spec_gre);
1594 case RTE_FLOW_ITEM_TYPE_MPLS:
1595 size += sizeof(struct ibv_flow_spec_mpls);
1598 case RTE_FLOW_ITEM_TYPE_GRE:
1599 size += sizeof(struct ibv_flow_spec_tunnel);
1610 * Internal preparation function. Allocate mlx5_flow with the required size.
1611 * The required size is calculate based on the actions and items. This function
1612 * also returns the detected actions and items for later use.
1615 * Pointer to Ethernet device.
1617 * Pointer to the flow attributes.
1619 * Pointer to the list of items.
1620 * @param[in] actions
1621 * Pointer to the list of actions.
1623 * Pointer to the error structure.
1626 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1629 static struct mlx5_flow *
1630 flow_verbs_prepare(struct rte_eth_dev *dev,
1631 const struct rte_flow_attr *attr __rte_unused,
1632 const struct rte_flow_item items[],
1633 const struct rte_flow_action actions[],
1634 struct rte_flow_error *error)
1637 uint32_t handle_idx = 0;
1638 struct mlx5_flow *dev_flow;
1639 struct mlx5_flow_handle *dev_handle;
1640 struct mlx5_priv *priv = dev->data->dev_private;
1641 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1644 size += flow_verbs_get_actions_size(actions);
1645 size += flow_verbs_get_items_size(items);
1646 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1647 rte_flow_error_set(error, E2BIG,
1648 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1649 "Verbs spec/action size too large");
1652 /* In case of corrupting the memory. */
1653 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1654 rte_flow_error_set(error, ENOSPC,
1655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1656 "not free temporary device flow");
1659 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1662 rte_flow_error_set(error, ENOMEM,
1663 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1664 "not enough memory to create flow handle");
1667 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1668 dev_flow = &wks->flows[wks->flow_idx++];
1669 dev_flow->handle = dev_handle;
1670 dev_flow->handle_idx = handle_idx;
1671 /* Memcpy is used, only size needs to be cleared to 0. */
1672 dev_flow->verbs.size = 0;
1673 dev_flow->verbs.attr.num_of_specs = 0;
1674 dev_flow->ingress = attr->ingress;
1675 dev_flow->hash_fields = 0;
1676 /* Need to set transfer attribute: not supported in Verbs mode. */
1681 * Fill the flow with verb spec.
1684 * Pointer to Ethernet device.
1685 * @param[in, out] dev_flow
1686 * Pointer to the mlx5 flow.
1688 * Pointer to the flow attributes.
1690 * Pointer to the list of items.
1691 * @param[in] actions
1692 * Pointer to the list of actions.
1694 * Pointer to the error structure.
1697 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1700 flow_verbs_translate(struct rte_eth_dev *dev,
1701 struct mlx5_flow *dev_flow,
1702 const struct rte_flow_attr *attr,
1703 const struct rte_flow_item items[],
1704 const struct rte_flow_action actions[],
1705 struct rte_flow_error *error)
1707 uint64_t item_flags = 0;
1708 uint64_t action_flags = 0;
1709 uint64_t priority = attr->priority;
1710 uint32_t subpriority = 0;
1711 struct mlx5_priv *priv = dev->data->dev_private;
1712 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1713 struct mlx5_flow_rss_desc *rss_desc;
1716 rss_desc = &wks->rss_desc;
1717 if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1718 priority = priv->config.flow_prio - 1;
1719 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1722 switch (actions->type) {
1723 case RTE_FLOW_ACTION_TYPE_VOID:
1725 case RTE_FLOW_ACTION_TYPE_FLAG:
1726 flow_verbs_translate_action_flag(dev_flow, actions);
1727 action_flags |= MLX5_FLOW_ACTION_FLAG;
1728 dev_flow->handle->mark = 1;
1730 case RTE_FLOW_ACTION_TYPE_MARK:
1731 flow_verbs_translate_action_mark(dev_flow, actions);
1732 action_flags |= MLX5_FLOW_ACTION_MARK;
1733 dev_flow->handle->mark = 1;
1735 case RTE_FLOW_ACTION_TYPE_DROP:
1736 flow_verbs_translate_action_drop(dev_flow, actions);
1737 action_flags |= MLX5_FLOW_ACTION_DROP;
1738 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1740 case RTE_FLOW_ACTION_TYPE_QUEUE:
1741 flow_verbs_translate_action_queue(rss_desc, actions);
1742 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1743 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1745 case RTE_FLOW_ACTION_TYPE_RSS:
1746 flow_verbs_translate_action_rss(rss_desc, actions);
1747 action_flags |= MLX5_FLOW_ACTION_RSS;
1748 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1750 case RTE_FLOW_ACTION_TYPE_COUNT:
1751 ret = flow_verbs_translate_action_count(dev_flow,
1756 action_flags |= MLX5_FLOW_ACTION_COUNT;
1759 return rte_flow_error_set(error, ENOTSUP,
1760 RTE_FLOW_ERROR_TYPE_ACTION,
1762 "action not supported");
1765 dev_flow->act_flags = action_flags;
1766 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1767 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1769 switch (items->type) {
1770 case RTE_FLOW_ITEM_TYPE_VOID:
1772 case RTE_FLOW_ITEM_TYPE_ETH:
1773 flow_verbs_translate_item_eth(dev_flow, items,
1775 subpriority = MLX5_PRIORITY_MAP_L2;
1776 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1777 MLX5_FLOW_LAYER_OUTER_L2;
1779 case RTE_FLOW_ITEM_TYPE_VLAN:
1780 flow_verbs_translate_item_vlan(dev_flow, items,
1782 subpriority = MLX5_PRIORITY_MAP_L2;
1783 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1784 MLX5_FLOW_LAYER_INNER_VLAN) :
1785 (MLX5_FLOW_LAYER_OUTER_L2 |
1786 MLX5_FLOW_LAYER_OUTER_VLAN);
1788 case RTE_FLOW_ITEM_TYPE_IPV4:
1789 flow_verbs_translate_item_ipv4(dev_flow, items,
1791 subpriority = MLX5_PRIORITY_MAP_L3;
1792 dev_flow->hash_fields |=
1793 mlx5_flow_hashfields_adjust
1795 MLX5_IPV4_LAYER_TYPES,
1796 MLX5_IPV4_IBV_RX_HASH);
1797 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1798 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1800 case RTE_FLOW_ITEM_TYPE_IPV6:
1801 flow_verbs_translate_item_ipv6(dev_flow, items,
1803 subpriority = MLX5_PRIORITY_MAP_L3;
1804 dev_flow->hash_fields |=
1805 mlx5_flow_hashfields_adjust
1807 MLX5_IPV6_LAYER_TYPES,
1808 MLX5_IPV6_IBV_RX_HASH);
1809 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1810 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1812 case RTE_FLOW_ITEM_TYPE_TCP:
1813 flow_verbs_translate_item_tcp(dev_flow, items,
1815 subpriority = MLX5_PRIORITY_MAP_L4;
1816 if (dev_flow->hash_fields != 0)
1817 dev_flow->hash_fields |=
1818 mlx5_flow_hashfields_adjust
1819 (rss_desc, tunnel, ETH_RSS_TCP,
1820 (IBV_RX_HASH_SRC_PORT_TCP |
1821 IBV_RX_HASH_DST_PORT_TCP));
1822 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1823 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1825 case RTE_FLOW_ITEM_TYPE_UDP:
1826 flow_verbs_translate_item_udp(dev_flow, items,
1828 subpriority = MLX5_PRIORITY_MAP_L4;
1829 if (dev_flow->hash_fields != 0)
1830 dev_flow->hash_fields |=
1831 mlx5_flow_hashfields_adjust
1832 (rss_desc, tunnel, ETH_RSS_UDP,
1833 (IBV_RX_HASH_SRC_PORT_UDP |
1834 IBV_RX_HASH_DST_PORT_UDP));
1835 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1836 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1838 case RTE_FLOW_ITEM_TYPE_VXLAN:
1839 flow_verbs_translate_item_vxlan(dev_flow, items,
1841 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1842 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1844 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1845 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1847 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1848 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1850 case RTE_FLOW_ITEM_TYPE_GRE:
1851 flow_verbs_translate_item_gre(dev_flow, items,
1853 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1854 item_flags |= MLX5_FLOW_LAYER_GRE;
1856 case RTE_FLOW_ITEM_TYPE_MPLS:
1857 flow_verbs_translate_item_mpls(dev_flow, items,
1859 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1860 item_flags |= MLX5_FLOW_LAYER_MPLS;
1863 return rte_flow_error_set(error, ENOTSUP,
1864 RTE_FLOW_ERROR_TYPE_ITEM,
1865 NULL, "item not supported");
1868 dev_flow->handle->layers = item_flags;
1869 /* Other members of attr will be ignored. */
1870 dev_flow->verbs.attr.priority =
1871 mlx5_flow_adjust_priority(dev, priority, subpriority);
1872 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1877 * Remove the flow from the NIC but keeps it in memory.
1880 * Pointer to the Ethernet device structure.
1881 * @param[in, out] flow
1882 * Pointer to flow structure.
1885 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1887 struct mlx5_priv *priv = dev->data->dev_private;
1888 struct mlx5_flow_handle *handle;
1889 uint32_t handle_idx;
1893 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1894 handle_idx, handle, next) {
1895 if (handle->drv_flow) {
1896 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1897 handle->drv_flow = NULL;
1899 /* hrxq is union, don't touch it only the flag is set. */
1900 if (handle->rix_hrxq &&
1901 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1902 mlx5_hrxq_release(dev, handle->rix_hrxq);
1903 handle->rix_hrxq = 0;
1905 if (handle->vf_vlan.tag && handle->vf_vlan.created)
1906 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1911 * Remove the flow from the NIC and the memory.
1914 * Pointer to the Ethernet device structure.
1915 * @param[in, out] flow
1916 * Pointer to flow structure.
1919 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1921 struct mlx5_priv *priv = dev->data->dev_private;
1922 struct mlx5_flow_handle *handle;
1926 flow_verbs_remove(dev, flow);
1927 while (flow->dev_handles) {
1928 uint32_t tmp_idx = flow->dev_handles;
1930 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1934 flow->dev_handles = handle->next.next;
1935 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1938 if (flow->counter) {
1939 flow_verbs_counter_release(dev, flow->counter);
1945 * Apply the flow to the NIC.
1948 * Pointer to the Ethernet device structure.
1949 * @param[in, out] flow
1950 * Pointer to flow structure.
1952 * Pointer to error structure.
1955 * 0 on success, a negative errno value otherwise and rte_errno is set.
1958 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1959 struct rte_flow_error *error)
1961 struct mlx5_priv *priv = dev->data->dev_private;
1962 struct mlx5_flow_handle *handle;
1963 struct mlx5_flow *dev_flow;
1964 struct mlx5_hrxq *hrxq;
1965 uint32_t dev_handles;
1968 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1971 for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1972 dev_flow = &wks->flows[idx];
1973 handle = dev_flow->handle;
1974 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1975 MLX5_ASSERT(priv->drop_queue.hrxq);
1976 hrxq = priv->drop_queue.hrxq;
1979 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1981 MLX5_ASSERT(rss_desc->queue_num);
1982 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1983 rss_desc->hash_fields = dev_flow->hash_fields;
1984 rss_desc->tunnel = !!(handle->layers &
1985 MLX5_FLOW_LAYER_TUNNEL);
1986 rss_desc->shared_rss = 0;
1987 hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
1988 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1994 "cannot get hash queue");
1997 handle->rix_hrxq = hrxq_idx;
2000 handle->drv_flow = mlx5_glue->create_flow
2001 (hrxq->qp, &dev_flow->verbs.attr);
2002 if (!handle->drv_flow) {
2003 rte_flow_error_set(error, errno,
2004 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2006 "hardware refuses to create flow");
2009 if (priv->vmwa_context &&
2010 handle->vf_vlan.tag && !handle->vf_vlan.created) {
2012 * The rule contains the VLAN pattern.
2013 * For VF we are going to create VLAN
2014 * interface to make hypervisor set correct
2015 * e-Switch vport context.
2017 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2022 err = rte_errno; /* Save rte_errno before cleanup. */
2023 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2024 dev_handles, handle, next) {
2025 /* hrxq is union, don't touch it only the flag is set. */
2026 if (handle->rix_hrxq &&
2027 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2028 mlx5_hrxq_release(dev, handle->rix_hrxq);
2029 handle->rix_hrxq = 0;
2031 if (handle->vf_vlan.tag && handle->vf_vlan.created)
2032 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2034 rte_errno = err; /* Restore rte_errno. */
2041 * @see rte_flow_query()
2045 flow_verbs_query(struct rte_eth_dev *dev,
2046 struct rte_flow *flow,
2047 const struct rte_flow_action *actions,
2049 struct rte_flow_error *error)
2053 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2054 switch (actions->type) {
2055 case RTE_FLOW_ACTION_TYPE_VOID:
2057 case RTE_FLOW_ACTION_TYPE_COUNT:
2058 ret = flow_verbs_counter_query(dev, flow, data, error);
2061 return rte_flow_error_set(error, ENOTSUP,
2062 RTE_FLOW_ERROR_TYPE_ACTION,
2064 "action not supported");
2071 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2075 RTE_SET_USED(domains);
2076 RTE_SET_USED(flags);
2081 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2082 .validate = flow_verbs_validate,
2083 .prepare = flow_verbs_prepare,
2084 .translate = flow_verbs_translate,
2085 .apply = flow_verbs_apply,
2086 .remove = flow_verbs_remove,
2087 .destroy = flow_verbs_destroy,
2088 .query = flow_verbs_query,
2089 .sync_domain = flow_verbs_sync_domain,