1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
19 #include <mlx5_glue.h>
21 #include <mlx5_malloc.h>
23 #include "mlx5_defs.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rxtx.h"
28 #define VERBS_SPEC_INNER(item_flags) \
29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 { 9, 10, 11 }, { 12, 13, 14 },
43 * Discover the maximum number of priority available.
46 * Pointer to the Ethernet device structure.
49 * number of supported flow priority on success, a negative errno
50 * value otherwise and rte_errno is set.
53 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
55 struct mlx5_priv *priv = dev->data->dev_private;
57 struct ibv_flow_attr attr;
58 struct ibv_flow_spec_eth eth;
59 struct ibv_flow_spec_action_drop drop;
63 .port = (uint8_t)priv->dev_port,
66 .type = IBV_FLOW_SPEC_ETH,
67 .size = sizeof(struct ibv_flow_spec_eth),
70 .size = sizeof(struct ibv_flow_spec_action_drop),
71 .type = IBV_FLOW_SPEC_ACTION_DROP,
74 struct ibv_flow *flow;
75 struct mlx5_hrxq *drop = mlx5_drop_action_create(dev);
76 uint16_t vprio[] = { 8, 16 };
84 for (i = 0; i != RTE_DIM(vprio); i++) {
85 flow_attr.attr.priority = vprio[i] - 1;
86 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
89 claim_zero(mlx5_glue->destroy_flow(flow));
92 mlx5_drop_action_destroy(dev);
95 priority = RTE_DIM(priority_map_3);
98 priority = RTE_DIM(priority_map_5);
103 "port %u verbs maximum priority: %d expected 8/16",
104 dev->data->port_id, priority);
107 DRV_LOG(INFO, "port %u flow maximum priority: %d",
108 dev->data->port_id, priority);
113 * Adjust flow priority based on the highest layer and the request priority.
116 * Pointer to the Ethernet device structure.
117 * @param[in] priority
118 * The rule base priority.
119 * @param[in] subpriority
120 * The priority based on the items.
126 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
127 uint32_t subpriority)
130 struct mlx5_priv *priv = dev->data->dev_private;
132 switch (priv->config.flow_prio) {
133 case RTE_DIM(priority_map_3):
134 res = priority_map_3[priority][subpriority];
136 case RTE_DIM(priority_map_5):
137 res = priority_map_5[priority][subpriority];
144 * Get Verbs flow counter by index.
147 * Pointer to the Ethernet device structure.
149 * mlx5 flow counter index in the container.
151 * mlx5 flow counter pool in the container,
154 * A pointer to the counter, NULL otherwise.
156 static struct mlx5_flow_counter *
157 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
159 struct mlx5_flow_counter_pool **ppool)
161 struct mlx5_priv *priv = dev->data->dev_private;
162 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
163 struct mlx5_flow_counter_pool *pool;
165 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
166 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
170 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
174 * Create Verbs flow counter with Verbs library.
177 * Pointer to the Ethernet device structure.
178 * @param[in, out] counter
179 * mlx5 flow counter object, contains the counter id,
180 * handle of created Verbs flow counter is returned
181 * in cs field (if counters are supported).
184 * 0 On success else a negative errno value is returned
185 * and rte_errno is set.
188 flow_verbs_counter_create(struct rte_eth_dev *dev,
189 struct mlx5_flow_counter_ext *counter)
191 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
192 struct mlx5_priv *priv = dev->data->dev_private;
193 struct ibv_context *ctx = priv->sh->ctx;
194 struct ibv_counter_set_init_attr init = {
195 .counter_set_id = counter->id};
197 counter->cs = mlx5_glue->create_counter_set(ctx, &init);
203 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
204 struct mlx5_priv *priv = dev->data->dev_private;
205 struct ibv_context *ctx = priv->sh->ctx;
206 struct ibv_counters_init_attr init = {0};
207 struct ibv_counter_attach_attr attach;
210 memset(&attach, 0, sizeof(attach));
211 counter->cs = mlx5_glue->create_counters(ctx, &init);
216 attach.counter_desc = IBV_COUNTER_PACKETS;
218 ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
220 attach.counter_desc = IBV_COUNTER_BYTES;
222 ret = mlx5_glue->attach_counters
223 (counter->cs, &attach, NULL);
226 claim_zero(mlx5_glue->destroy_counters(counter->cs));
241 * Get a flow counter.
244 * Pointer to the Ethernet device structure.
246 * Indicate if this counter is shared with other flows.
248 * Counter identifier.
251 * Index to the counter, 0 otherwise and rte_errno is set.
254 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
256 struct mlx5_priv *priv = dev->data->dev_private;
257 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
258 struct mlx5_flow_counter_pool *pool = NULL;
259 struct mlx5_flow_counter_ext *cnt_ext = NULL;
260 struct mlx5_flow_counter *cnt = NULL;
261 union mlx5_l3t_data data;
262 uint32_t n_valid = cmng->n_valid;
263 uint32_t pool_idx, cnt_idx;
267 if (shared && !mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) &&
269 cnt = flow_verbs_counter_get_by_idx(dev, data.dword, NULL);
270 if (cnt->shared_info.ref_cnt + 1 == 0) {
274 cnt->shared_info.ref_cnt++;
277 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
278 pool = cmng->pools[pool_idx];
281 cnt = TAILQ_FIRST(&pool->counters[0]);
286 struct mlx5_flow_counter_pool **pools;
289 if (n_valid == cmng->n) {
290 /* Resize the container pool array. */
291 size = sizeof(struct mlx5_flow_counter_pool *) *
292 (n_valid + MLX5_CNT_CONTAINER_RESIZE);
293 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
298 memcpy(pools, cmng->pools,
299 sizeof(struct mlx5_flow_counter_pool *) *
301 mlx5_free(cmng->pools);
304 cmng->n += MLX5_CNT_CONTAINER_RESIZE;
306 /* Allocate memory for new pool*/
307 size = sizeof(*pool) + (sizeof(*cnt_ext) + sizeof(*cnt)) *
308 MLX5_COUNTERS_PER_POOL;
309 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
312 pool->type |= CNT_POOL_TYPE_EXT;
313 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
314 cnt = MLX5_POOL_GET_CNT(pool, i);
315 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
317 cnt = MLX5_POOL_GET_CNT(pool, 0);
318 cmng->pools[n_valid] = pool;
322 i = MLX5_CNT_ARRAY_IDX(pool, cnt);
323 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
325 data.dword = cnt_idx;
326 if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
328 cnt->shared_info.ref_cnt = 1;
329 cnt->shared_info.id = id;
330 cnt_idx |= MLX5_CNT_SHARED_OFFSET;
332 cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
335 /* Create counter with Verbs. */
336 ret = flow_verbs_counter_create(dev, cnt_ext);
338 TAILQ_REMOVE(&pool->counters[0], cnt, next);
341 /* Some error occurred in Verbs library. */
347 * Release a flow counter.
350 * Pointer to the Ethernet device structure.
352 * Index to the counter handler.
355 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
357 struct mlx5_priv *priv = dev->data->dev_private;
358 struct mlx5_flow_counter_pool *pool;
359 struct mlx5_flow_counter *cnt;
360 struct mlx5_flow_counter_ext *cnt_ext;
362 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
363 if (IS_SHARED_CNT(counter)) {
364 if (--cnt->shared_info.ref_cnt)
366 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
367 cnt->shared_info.id);
369 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
370 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
371 claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs));
373 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
374 claim_zero(mlx5_glue->destroy_counters(cnt_ext->cs));
378 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
382 * Query a flow counter via Verbs library call.
384 * @see rte_flow_query()
388 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
389 struct rte_flow *flow, void *data,
390 struct rte_flow_error *error)
392 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
393 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
395 struct mlx5_flow_counter_pool *pool;
396 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
397 (dev, flow->counter, &pool);
398 struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT
400 struct rte_flow_query_count *qc = data;
401 uint64_t counters[2] = {0, 0};
402 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
403 struct ibv_query_counter_set_attr query_cs_attr = {
405 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
407 struct ibv_counter_set_data query_out = {
409 .outlen = 2 * sizeof(uint64_t),
411 int err = mlx5_glue->query_counter_set(&query_cs_attr,
413 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
414 int err = mlx5_glue->query_counters
415 (cnt_ext->cs, counters,
417 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
420 return rte_flow_error_set
422 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
424 "cannot read counter");
427 qc->hits = counters[0] - cnt->hits;
428 qc->bytes = counters[1] - cnt->bytes;
430 cnt->hits = counters[0];
431 cnt->bytes = counters[1];
435 return rte_flow_error_set(error, EINVAL,
436 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
438 "flow does not have counter");
442 return rte_flow_error_set(error, ENOTSUP,
443 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
445 "counters are not available");
450 * Add a verbs item specification into @p verbs.
453 * Pointer to verbs structure.
455 * Create specification.
457 * Size in bytes of the specification to copy.
460 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
461 void *src, unsigned int size)
467 MLX5_ASSERT(verbs->specs);
468 dst = (void *)(verbs->specs + verbs->size);
469 memcpy(dst, src, size);
470 ++verbs->attr.num_of_specs;
475 * Convert the @p item into a Verbs specification. This function assumes that
476 * the input is valid and that there is space to insert the requested item
479 * @param[in, out] dev_flow
480 * Pointer to dev_flow structure.
482 * Item specification.
483 * @param[in] item_flags
487 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
488 const struct rte_flow_item *item,
491 const struct rte_flow_item_eth *spec = item->spec;
492 const struct rte_flow_item_eth *mask = item->mask;
493 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
494 struct ibv_flow_spec_eth eth = {
495 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
500 mask = &rte_flow_item_eth_mask;
504 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
506 memcpy(ð.val.src_mac, spec->src.addr_bytes,
508 eth.val.ether_type = spec->type;
509 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
511 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
513 eth.mask.ether_type = mask->type;
514 /* Remove unwanted bits from values. */
515 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
516 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
517 eth.val.src_mac[i] &= eth.mask.src_mac[i];
519 eth.val.ether_type &= eth.mask.ether_type;
521 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
525 * Update the VLAN tag in the Verbs Ethernet specification.
526 * This function assumes that the input is valid and there is space to add
527 * the requested item.
529 * @param[in, out] attr
530 * Pointer to Verbs attributes structure.
532 * Verbs structure containing the VLAN information to copy.
535 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
536 struct ibv_flow_spec_eth *eth)
539 const enum ibv_flow_spec_type search = eth->type;
540 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
541 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
543 for (i = 0; i != attr->num_of_specs; ++i) {
544 if (hdr->type == search) {
545 struct ibv_flow_spec_eth *e =
546 (struct ibv_flow_spec_eth *)hdr;
548 e->val.vlan_tag = eth->val.vlan_tag;
549 e->mask.vlan_tag = eth->mask.vlan_tag;
550 e->val.ether_type = eth->val.ether_type;
551 e->mask.ether_type = eth->mask.ether_type;
554 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
559 * Convert the @p item into a Verbs specification. This function assumes that
560 * the input is valid and that there is space to insert the requested item
563 * @param[in, out] dev_flow
564 * Pointer to dev_flow structure.
566 * Item specification.
567 * @param[in] item_flags
571 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
572 const struct rte_flow_item *item,
575 const struct rte_flow_item_vlan *spec = item->spec;
576 const struct rte_flow_item_vlan *mask = item->mask;
577 unsigned int size = sizeof(struct ibv_flow_spec_eth);
578 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
579 struct ibv_flow_spec_eth eth = {
580 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
583 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
584 MLX5_FLOW_LAYER_OUTER_L2;
587 mask = &rte_flow_item_vlan_mask;
589 eth.val.vlan_tag = spec->tci;
590 eth.mask.vlan_tag = mask->tci;
591 eth.val.vlan_tag &= eth.mask.vlan_tag;
592 eth.val.ether_type = spec->inner_type;
593 eth.mask.ether_type = mask->inner_type;
594 eth.val.ether_type &= eth.mask.ether_type;
596 if (!(item_flags & l2m))
597 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
599 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
601 dev_flow->handle->vf_vlan.tag =
602 rte_be_to_cpu_16(spec->tci) & 0x0fff;
606 * Convert the @p item into a Verbs specification. This function assumes that
607 * the input is valid and that there is space to insert the requested item
610 * @param[in, out] dev_flow
611 * Pointer to dev_flow structure.
613 * Item specification.
614 * @param[in] item_flags
618 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
619 const struct rte_flow_item *item,
622 const struct rte_flow_item_ipv4 *spec = item->spec;
623 const struct rte_flow_item_ipv4 *mask = item->mask;
624 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
625 struct ibv_flow_spec_ipv4_ext ipv4 = {
626 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
631 mask = &rte_flow_item_ipv4_mask;
633 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
634 .src_ip = spec->hdr.src_addr,
635 .dst_ip = spec->hdr.dst_addr,
636 .proto = spec->hdr.next_proto_id,
637 .tos = spec->hdr.type_of_service,
639 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
640 .src_ip = mask->hdr.src_addr,
641 .dst_ip = mask->hdr.dst_addr,
642 .proto = mask->hdr.next_proto_id,
643 .tos = mask->hdr.type_of_service,
645 /* Remove unwanted bits from values. */
646 ipv4.val.src_ip &= ipv4.mask.src_ip;
647 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
648 ipv4.val.proto &= ipv4.mask.proto;
649 ipv4.val.tos &= ipv4.mask.tos;
651 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
655 * Convert the @p item into a Verbs specification. This function assumes that
656 * the input is valid and that there is space to insert the requested item
659 * @param[in, out] dev_flow
660 * Pointer to dev_flow structure.
662 * Item specification.
663 * @param[in] item_flags
667 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
668 const struct rte_flow_item *item,
671 const struct rte_flow_item_ipv6 *spec = item->spec;
672 const struct rte_flow_item_ipv6 *mask = item->mask;
673 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
674 struct ibv_flow_spec_ipv6 ipv6 = {
675 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
680 mask = &rte_flow_item_ipv6_mask;
683 uint32_t vtc_flow_val;
684 uint32_t vtc_flow_mask;
686 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
687 RTE_DIM(ipv6.val.src_ip));
688 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
689 RTE_DIM(ipv6.val.dst_ip));
690 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
691 RTE_DIM(ipv6.mask.src_ip));
692 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
693 RTE_DIM(ipv6.mask.dst_ip));
694 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
695 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
696 ipv6.val.flow_label =
697 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
698 RTE_IPV6_HDR_FL_SHIFT);
699 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
700 RTE_IPV6_HDR_TC_SHIFT;
701 ipv6.val.next_hdr = spec->hdr.proto;
702 ipv6.mask.flow_label =
703 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
704 RTE_IPV6_HDR_FL_SHIFT);
705 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
706 RTE_IPV6_HDR_TC_SHIFT;
707 ipv6.mask.next_hdr = mask->hdr.proto;
708 /* Remove unwanted bits from values. */
709 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
710 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
711 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
713 ipv6.val.flow_label &= ipv6.mask.flow_label;
714 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
715 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
717 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
721 * Convert the @p item into a Verbs specification. This function assumes that
722 * the input is valid and that there is space to insert the requested item
725 * @param[in, out] dev_flow
726 * Pointer to dev_flow structure.
728 * Item specification.
729 * @param[in] item_flags
733 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
734 const struct rte_flow_item *item,
735 uint64_t item_flags __rte_unused)
737 const struct rte_flow_item_tcp *spec = item->spec;
738 const struct rte_flow_item_tcp *mask = item->mask;
739 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
740 struct ibv_flow_spec_tcp_udp tcp = {
741 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
746 mask = &rte_flow_item_tcp_mask;
748 tcp.val.dst_port = spec->hdr.dst_port;
749 tcp.val.src_port = spec->hdr.src_port;
750 tcp.mask.dst_port = mask->hdr.dst_port;
751 tcp.mask.src_port = mask->hdr.src_port;
752 /* Remove unwanted bits from values. */
753 tcp.val.src_port &= tcp.mask.src_port;
754 tcp.val.dst_port &= tcp.mask.dst_port;
756 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
760 * Convert the @p item into a Verbs specification. This function assumes that
761 * the input is valid and that there is space to insert the requested item
764 * @param[in, out] dev_flow
765 * Pointer to dev_flow structure.
767 * Item specification.
768 * @param[in] item_flags
772 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
773 const struct rte_flow_item *item,
774 uint64_t item_flags __rte_unused)
776 const struct rte_flow_item_udp *spec = item->spec;
777 const struct rte_flow_item_udp *mask = item->mask;
778 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
779 struct ibv_flow_spec_tcp_udp udp = {
780 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
785 mask = &rte_flow_item_udp_mask;
787 udp.val.dst_port = spec->hdr.dst_port;
788 udp.val.src_port = spec->hdr.src_port;
789 udp.mask.dst_port = mask->hdr.dst_port;
790 udp.mask.src_port = mask->hdr.src_port;
791 /* Remove unwanted bits from values. */
792 udp.val.src_port &= udp.mask.src_port;
793 udp.val.dst_port &= udp.mask.dst_port;
796 while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
798 if (!(udp.val.dst_port & udp.mask.dst_port)) {
799 switch ((item)->type) {
800 case RTE_FLOW_ITEM_TYPE_VXLAN:
801 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
802 udp.mask.dst_port = 0xffff;
804 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
805 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
806 udp.mask.dst_port = 0xffff;
808 case RTE_FLOW_ITEM_TYPE_MPLS:
809 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
810 udp.mask.dst_port = 0xffff;
817 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
821 * Convert the @p item into a Verbs specification. This function assumes that
822 * the input is valid and that there is space to insert the requested item
825 * @param[in, out] dev_flow
826 * Pointer to dev_flow structure.
828 * Item specification.
829 * @param[in] item_flags
833 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
834 const struct rte_flow_item *item,
835 uint64_t item_flags __rte_unused)
837 const struct rte_flow_item_vxlan *spec = item->spec;
838 const struct rte_flow_item_vxlan *mask = item->mask;
839 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
840 struct ibv_flow_spec_tunnel vxlan = {
841 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
847 } id = { .vlan_id = 0, };
850 mask = &rte_flow_item_vxlan_mask;
852 memcpy(&id.vni[1], spec->vni, 3);
853 vxlan.val.tunnel_id = id.vlan_id;
854 memcpy(&id.vni[1], mask->vni, 3);
855 vxlan.mask.tunnel_id = id.vlan_id;
856 /* Remove unwanted bits from values. */
857 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
859 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
863 * Convert the @p item into a Verbs specification. This function assumes that
864 * the input is valid and that there is space to insert the requested item
867 * @param[in, out] dev_flow
868 * Pointer to dev_flow structure.
870 * Item specification.
871 * @param[in] item_flags
875 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
876 const struct rte_flow_item *item,
877 uint64_t item_flags __rte_unused)
879 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
880 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
881 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
882 struct ibv_flow_spec_tunnel vxlan_gpe = {
883 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
889 } id = { .vlan_id = 0, };
892 mask = &rte_flow_item_vxlan_gpe_mask;
894 memcpy(&id.vni[1], spec->vni, 3);
895 vxlan_gpe.val.tunnel_id = id.vlan_id;
896 memcpy(&id.vni[1], mask->vni, 3);
897 vxlan_gpe.mask.tunnel_id = id.vlan_id;
898 /* Remove unwanted bits from values. */
899 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
901 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
905 * Update the protocol in Verbs IPv4/IPv6 spec.
907 * @param[in, out] attr
908 * Pointer to Verbs attributes structure.
910 * Specification type to search in order to update the IP protocol.
911 * @param[in] protocol
912 * Protocol value to set if none is present in the specification.
915 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
916 enum ibv_flow_spec_type search,
920 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
921 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
925 for (i = 0; i != attr->num_of_specs; ++i) {
926 if (hdr->type == search) {
928 struct ibv_flow_spec_ipv4_ext *ipv4;
929 struct ibv_flow_spec_ipv6 *ipv6;
933 case IBV_FLOW_SPEC_IPV4_EXT:
934 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
935 if (!ip.ipv4->val.proto) {
936 ip.ipv4->val.proto = protocol;
937 ip.ipv4->mask.proto = 0xff;
940 case IBV_FLOW_SPEC_IPV6:
941 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
942 if (!ip.ipv6->val.next_hdr) {
943 ip.ipv6->val.next_hdr = protocol;
944 ip.ipv6->mask.next_hdr = 0xff;
952 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
957 * Convert the @p item into a Verbs specification. This function assumes that
958 * the input is valid and that there is space to insert the requested item
961 * @param[in, out] dev_flow
962 * Pointer to dev_flow structure.
964 * Item specification.
965 * @param[in] item_flags
969 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
970 const struct rte_flow_item *item __rte_unused,
973 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
974 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
975 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
976 struct ibv_flow_spec_tunnel tunnel = {
977 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
981 const struct rte_flow_item_gre *spec = item->spec;
982 const struct rte_flow_item_gre *mask = item->mask;
983 unsigned int size = sizeof(struct ibv_flow_spec_gre);
984 struct ibv_flow_spec_gre tunnel = {
985 .type = IBV_FLOW_SPEC_GRE,
990 mask = &rte_flow_item_gre_mask;
992 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
993 tunnel.val.protocol = spec->protocol;
994 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
995 tunnel.mask.protocol = mask->protocol;
996 /* Remove unwanted bits from values. */
997 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
998 tunnel.val.protocol &= tunnel.mask.protocol;
999 tunnel.val.key &= tunnel.mask.key;
1002 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
1003 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
1004 IBV_FLOW_SPEC_IPV4_EXT,
1007 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
1010 flow_verbs_spec_add(verbs, &tunnel, size);
1014 * Convert the @p action into a Verbs specification. This function assumes that
1015 * the input is valid and that there is space to insert the requested action
1016 * into the flow. This function also return the action that was added.
1018 * @param[in, out] dev_flow
1019 * Pointer to dev_flow structure.
1021 * Item specification.
1022 * @param[in] item_flags
1023 * Parsed item flags.
1026 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1027 const struct rte_flow_item *item __rte_unused,
1028 uint64_t item_flags __rte_unused)
1030 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1031 const struct rte_flow_item_mpls *spec = item->spec;
1032 const struct rte_flow_item_mpls *mask = item->mask;
1033 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1034 struct ibv_flow_spec_mpls mpls = {
1035 .type = IBV_FLOW_SPEC_MPLS,
1040 mask = &rte_flow_item_mpls_mask;
1042 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1043 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1044 /* Remove unwanted bits from values. */
1045 mpls.val.label &= mpls.mask.label;
1047 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1052 * Convert the @p action into a Verbs specification. This function assumes that
1053 * the input is valid and that there is space to insert the requested action
1056 * @param[in] dev_flow
1057 * Pointer to mlx5_flow.
1059 * Action configuration.
1062 flow_verbs_translate_action_drop
1063 (struct mlx5_flow *dev_flow,
1064 const struct rte_flow_action *action __rte_unused)
1066 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1067 struct ibv_flow_spec_action_drop drop = {
1068 .type = IBV_FLOW_SPEC_ACTION_DROP,
1072 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1076 * Convert the @p action into a Verbs specification. This function assumes that
1077 * the input is valid and that there is space to insert the requested action
1080 * @param[in] rss_desc
1081 * Pointer to mlx5_flow_rss_desc.
1083 * Action configuration.
1086 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1087 const struct rte_flow_action *action)
1089 const struct rte_flow_action_queue *queue = action->conf;
1091 rss_desc->queue[0] = queue->index;
1092 rss_desc->queue_num = 1;
1096 * Convert the @p action into a Verbs specification. This function assumes that
1097 * the input is valid and that there is space to insert the requested action
1100 * @param[in] rss_desc
1101 * Pointer to mlx5_flow_rss_desc.
1103 * Action configuration.
1106 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1107 const struct rte_flow_action *action)
1109 const struct rte_flow_action_rss *rss = action->conf;
1110 const uint8_t *rss_key;
1112 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1113 rss_desc->queue_num = rss->queue_num;
1114 /* NULL RSS key indicates default RSS key. */
1115 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1116 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1118 * rss->level and rss.types should be set in advance when expanding
1124 * Convert the @p action into a Verbs specification. This function assumes that
1125 * the input is valid and that there is space to insert the requested action
1128 * @param[in] dev_flow
1129 * Pointer to mlx5_flow.
1131 * Action configuration.
1134 flow_verbs_translate_action_flag
1135 (struct mlx5_flow *dev_flow,
1136 const struct rte_flow_action *action __rte_unused)
1138 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1139 struct ibv_flow_spec_action_tag tag = {
1140 .type = IBV_FLOW_SPEC_ACTION_TAG,
1142 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1145 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1149 * Convert the @p action into a Verbs specification. This function assumes that
1150 * the input is valid and that there is space to insert the requested action
1153 * @param[in] dev_flow
1154 * Pointer to mlx5_flow.
1156 * Action configuration.
1159 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1160 const struct rte_flow_action *action)
1162 const struct rte_flow_action_mark *mark = action->conf;
1163 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1164 struct ibv_flow_spec_action_tag tag = {
1165 .type = IBV_FLOW_SPEC_ACTION_TAG,
1167 .tag_id = mlx5_flow_mark_set(mark->id),
1170 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1174 * Convert the @p action into a Verbs specification. This function assumes that
1175 * the input is valid and that there is space to insert the requested action
1179 * Pointer to the Ethernet device structure.
1181 * Action configuration.
1182 * @param[in] dev_flow
1183 * Pointer to mlx5_flow.
1185 * Pointer to error structure.
1188 * 0 On success else a negative errno value is returned and rte_errno is set.
1191 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1192 const struct rte_flow_action *action,
1193 struct rte_eth_dev *dev,
1194 struct rte_flow_error *error)
1196 const struct rte_flow_action_count *count = action->conf;
1197 struct rte_flow *flow = dev_flow->flow;
1198 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1199 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1200 struct mlx5_flow_counter_pool *pool;
1201 struct mlx5_flow_counter *cnt = NULL;
1202 struct mlx5_flow_counter_ext *cnt_ext;
1203 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1204 struct ibv_flow_spec_counter_action counter = {
1205 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1210 if (!flow->counter) {
1211 flow->counter = flow_verbs_counter_new(dev, count->shared,
1214 return rte_flow_error_set(error, rte_errno,
1215 RTE_FLOW_ERROR_TYPE_ACTION,
1217 "cannot get counter"
1220 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1221 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1222 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
1223 counter.counter_set_handle = cnt_ext->cs->handle;
1224 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1225 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1226 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1227 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
1228 counter.counters = cnt_ext->cs;
1229 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1235 * Internal validation function. For validating both actions and items.
1238 * Pointer to the Ethernet device structure.
1240 * Pointer to the flow attributes.
1242 * Pointer to the list of items.
1243 * @param[in] actions
1244 * Pointer to the list of actions.
1245 * @param[in] external
1246 * This flow rule is created by request external to PMD.
1247 * @param[in] hairpin
1248 * Number of hairpin TX actions, 0 means classic flow.
1250 * Pointer to the error structure.
1253 * 0 on success, a negative errno value otherwise and rte_errno is set.
1256 flow_verbs_validate(struct rte_eth_dev *dev,
1257 const struct rte_flow_attr *attr,
1258 const struct rte_flow_item items[],
1259 const struct rte_flow_action actions[],
1260 bool external __rte_unused,
1261 int hairpin __rte_unused,
1262 struct rte_flow_error *error)
1265 uint64_t action_flags = 0;
1266 uint64_t item_flags = 0;
1267 uint64_t last_item = 0;
1268 uint8_t next_protocol = 0xff;
1269 uint16_t ether_type = 0;
1273 ret = mlx5_flow_validate_attributes(dev, attr, error);
1276 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1277 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1280 switch (items->type) {
1281 case RTE_FLOW_ITEM_TYPE_VOID:
1283 case RTE_FLOW_ITEM_TYPE_ETH:
1284 ret = mlx5_flow_validate_item_eth(items, item_flags,
1288 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1289 MLX5_FLOW_LAYER_OUTER_L2;
1290 if (items->mask != NULL && items->spec != NULL) {
1292 ((const struct rte_flow_item_eth *)
1295 ((const struct rte_flow_item_eth *)
1297 ether_type = rte_be_to_cpu_16(ether_type);
1302 case RTE_FLOW_ITEM_TYPE_VLAN:
1303 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1307 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1308 MLX5_FLOW_LAYER_INNER_VLAN) :
1309 (MLX5_FLOW_LAYER_OUTER_L2 |
1310 MLX5_FLOW_LAYER_OUTER_VLAN);
1311 if (items->mask != NULL && items->spec != NULL) {
1313 ((const struct rte_flow_item_vlan *)
1314 items->spec)->inner_type;
1316 ((const struct rte_flow_item_vlan *)
1317 items->mask)->inner_type;
1318 ether_type = rte_be_to_cpu_16(ether_type);
1323 case RTE_FLOW_ITEM_TYPE_IPV4:
1324 ret = mlx5_flow_validate_item_ipv4
1326 last_item, ether_type, NULL,
1327 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1331 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1332 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1333 if (items->mask != NULL &&
1334 ((const struct rte_flow_item_ipv4 *)
1335 items->mask)->hdr.next_proto_id) {
1337 ((const struct rte_flow_item_ipv4 *)
1338 (items->spec))->hdr.next_proto_id;
1340 ((const struct rte_flow_item_ipv4 *)
1341 (items->mask))->hdr.next_proto_id;
1343 /* Reset for inner layer. */
1344 next_protocol = 0xff;
1347 case RTE_FLOW_ITEM_TYPE_IPV6:
1348 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1354 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1355 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1356 if (items->mask != NULL &&
1357 ((const struct rte_flow_item_ipv6 *)
1358 items->mask)->hdr.proto) {
1360 ((const struct rte_flow_item_ipv6 *)
1361 items->spec)->hdr.proto;
1363 ((const struct rte_flow_item_ipv6 *)
1364 items->mask)->hdr.proto;
1366 /* Reset for inner layer. */
1367 next_protocol = 0xff;
1370 case RTE_FLOW_ITEM_TYPE_UDP:
1371 ret = mlx5_flow_validate_item_udp(items, item_flags,
1376 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1377 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1379 case RTE_FLOW_ITEM_TYPE_TCP:
1380 ret = mlx5_flow_validate_item_tcp
1383 &rte_flow_item_tcp_mask,
1387 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1388 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1390 case RTE_FLOW_ITEM_TYPE_VXLAN:
1391 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1395 last_item = MLX5_FLOW_LAYER_VXLAN;
1397 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1398 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1403 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1405 case RTE_FLOW_ITEM_TYPE_GRE:
1406 ret = mlx5_flow_validate_item_gre(items, item_flags,
1407 next_protocol, error);
1410 last_item = MLX5_FLOW_LAYER_GRE;
1412 case RTE_FLOW_ITEM_TYPE_MPLS:
1413 ret = mlx5_flow_validate_item_mpls(dev, items,
1418 last_item = MLX5_FLOW_LAYER_MPLS;
1421 return rte_flow_error_set(error, ENOTSUP,
1422 RTE_FLOW_ERROR_TYPE_ITEM,
1423 NULL, "item not supported");
1425 item_flags |= last_item;
1427 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1428 switch (actions->type) {
1429 case RTE_FLOW_ACTION_TYPE_VOID:
1431 case RTE_FLOW_ACTION_TYPE_FLAG:
1432 ret = mlx5_flow_validate_action_flag(action_flags,
1437 action_flags |= MLX5_FLOW_ACTION_FLAG;
1439 case RTE_FLOW_ACTION_TYPE_MARK:
1440 ret = mlx5_flow_validate_action_mark(actions,
1446 action_flags |= MLX5_FLOW_ACTION_MARK;
1448 case RTE_FLOW_ACTION_TYPE_DROP:
1449 ret = mlx5_flow_validate_action_drop(action_flags,
1454 action_flags |= MLX5_FLOW_ACTION_DROP;
1456 case RTE_FLOW_ACTION_TYPE_QUEUE:
1457 ret = mlx5_flow_validate_action_queue(actions,
1463 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1465 case RTE_FLOW_ACTION_TYPE_RSS:
1466 ret = mlx5_flow_validate_action_rss(actions,
1472 action_flags |= MLX5_FLOW_ACTION_RSS;
1474 case RTE_FLOW_ACTION_TYPE_COUNT:
1475 ret = mlx5_flow_validate_action_count(dev, attr, error);
1478 action_flags |= MLX5_FLOW_ACTION_COUNT;
1481 return rte_flow_error_set(error, ENOTSUP,
1482 RTE_FLOW_ERROR_TYPE_ACTION,
1484 "action not supported");
1488 * Validate the drop action mutual exclusion with other actions.
1489 * Drop action is mutually-exclusive with any other action, except for
1492 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1493 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1494 return rte_flow_error_set(error, EINVAL,
1495 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1496 "Drop action is mutually-exclusive "
1497 "with any other action, except for "
1499 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1500 return rte_flow_error_set(error, EINVAL,
1501 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1502 "no fate action is found");
1507 * Calculate the required bytes that are needed for the action part of the verbs
1510 * @param[in] actions
1511 * Pointer to the list of actions.
1514 * The size of the memory needed for all actions.
1517 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1521 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1522 switch (actions->type) {
1523 case RTE_FLOW_ACTION_TYPE_VOID:
1525 case RTE_FLOW_ACTION_TYPE_FLAG:
1526 size += sizeof(struct ibv_flow_spec_action_tag);
1528 case RTE_FLOW_ACTION_TYPE_MARK:
1529 size += sizeof(struct ibv_flow_spec_action_tag);
1531 case RTE_FLOW_ACTION_TYPE_DROP:
1532 size += sizeof(struct ibv_flow_spec_action_drop);
1534 case RTE_FLOW_ACTION_TYPE_QUEUE:
1536 case RTE_FLOW_ACTION_TYPE_RSS:
1538 case RTE_FLOW_ACTION_TYPE_COUNT:
1539 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1540 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1541 size += sizeof(struct ibv_flow_spec_counter_action);
1552 * Calculate the required bytes that are needed for the item part of the verbs
1556 * Pointer to the list of items.
1559 * The size of the memory needed for all items.
1562 flow_verbs_get_items_size(const struct rte_flow_item items[])
1566 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1567 switch (items->type) {
1568 case RTE_FLOW_ITEM_TYPE_VOID:
1570 case RTE_FLOW_ITEM_TYPE_ETH:
1571 size += sizeof(struct ibv_flow_spec_eth);
1573 case RTE_FLOW_ITEM_TYPE_VLAN:
1574 size += sizeof(struct ibv_flow_spec_eth);
1576 case RTE_FLOW_ITEM_TYPE_IPV4:
1577 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1579 case RTE_FLOW_ITEM_TYPE_IPV6:
1580 size += sizeof(struct ibv_flow_spec_ipv6);
1582 case RTE_FLOW_ITEM_TYPE_UDP:
1583 size += sizeof(struct ibv_flow_spec_tcp_udp);
1585 case RTE_FLOW_ITEM_TYPE_TCP:
1586 size += sizeof(struct ibv_flow_spec_tcp_udp);
1588 case RTE_FLOW_ITEM_TYPE_VXLAN:
1589 size += sizeof(struct ibv_flow_spec_tunnel);
1591 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1592 size += sizeof(struct ibv_flow_spec_tunnel);
1594 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1595 case RTE_FLOW_ITEM_TYPE_GRE:
1596 size += sizeof(struct ibv_flow_spec_gre);
1598 case RTE_FLOW_ITEM_TYPE_MPLS:
1599 size += sizeof(struct ibv_flow_spec_mpls);
1602 case RTE_FLOW_ITEM_TYPE_GRE:
1603 size += sizeof(struct ibv_flow_spec_tunnel);
1614 * Internal preparation function. Allocate mlx5_flow with the required size.
1615 * The required size is calculate based on the actions and items. This function
1616 * also returns the detected actions and items for later use.
1619 * Pointer to Ethernet device.
1621 * Pointer to the flow attributes.
1623 * Pointer to the list of items.
1624 * @param[in] actions
1625 * Pointer to the list of actions.
1627 * Pointer to the error structure.
1630 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1633 static struct mlx5_flow *
1634 flow_verbs_prepare(struct rte_eth_dev *dev,
1635 const struct rte_flow_attr *attr __rte_unused,
1636 const struct rte_flow_item items[],
1637 const struct rte_flow_action actions[],
1638 struct rte_flow_error *error)
1641 uint32_t handle_idx = 0;
1642 struct mlx5_flow *dev_flow;
1643 struct mlx5_flow_handle *dev_handle;
1644 struct mlx5_priv *priv = dev->data->dev_private;
1646 size += flow_verbs_get_actions_size(actions);
1647 size += flow_verbs_get_items_size(items);
1648 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1649 rte_flow_error_set(error, E2BIG,
1650 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1651 "Verbs spec/action size too large");
1654 /* In case of corrupting the memory. */
1655 if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1656 rte_flow_error_set(error, ENOSPC,
1657 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1658 "not free temporary device flow");
1661 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1664 rte_flow_error_set(error, ENOMEM,
1665 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1666 "not enough memory to create flow handle");
1669 /* No multi-thread supporting. */
1670 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
1671 dev_flow->handle = dev_handle;
1672 dev_flow->handle_idx = handle_idx;
1673 /* Memcpy is used, only size needs to be cleared to 0. */
1674 dev_flow->verbs.size = 0;
1675 dev_flow->verbs.attr.num_of_specs = 0;
1676 dev_flow->ingress = attr->ingress;
1677 dev_flow->hash_fields = 0;
1678 /* Need to set transfer attribute: not supported in Verbs mode. */
1683 * Fill the flow with verb spec.
1686 * Pointer to Ethernet device.
1687 * @param[in, out] dev_flow
1688 * Pointer to the mlx5 flow.
1690 * Pointer to the flow attributes.
1692 * Pointer to the list of items.
1693 * @param[in] actions
1694 * Pointer to the list of actions.
1696 * Pointer to the error structure.
1699 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1702 flow_verbs_translate(struct rte_eth_dev *dev,
1703 struct mlx5_flow *dev_flow,
1704 const struct rte_flow_attr *attr,
1705 const struct rte_flow_item items[],
1706 const struct rte_flow_action actions[],
1707 struct rte_flow_error *error)
1709 uint64_t item_flags = 0;
1710 uint64_t action_flags = 0;
1711 uint64_t priority = attr->priority;
1712 uint32_t subpriority = 0;
1713 struct mlx5_priv *priv = dev->data->dev_private;
1714 struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
1716 [!!priv->flow_nested_idx];
1718 if (priority == MLX5_FLOW_PRIO_RSVD)
1719 priority = priv->config.flow_prio - 1;
1720 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1723 switch (actions->type) {
1724 case RTE_FLOW_ACTION_TYPE_VOID:
1726 case RTE_FLOW_ACTION_TYPE_FLAG:
1727 flow_verbs_translate_action_flag(dev_flow, actions);
1728 action_flags |= MLX5_FLOW_ACTION_FLAG;
1729 dev_flow->handle->mark = 1;
1731 case RTE_FLOW_ACTION_TYPE_MARK:
1732 flow_verbs_translate_action_mark(dev_flow, actions);
1733 action_flags |= MLX5_FLOW_ACTION_MARK;
1734 dev_flow->handle->mark = 1;
1736 case RTE_FLOW_ACTION_TYPE_DROP:
1737 flow_verbs_translate_action_drop(dev_flow, actions);
1738 action_flags |= MLX5_FLOW_ACTION_DROP;
1739 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1741 case RTE_FLOW_ACTION_TYPE_QUEUE:
1742 flow_verbs_translate_action_queue(rss_desc, actions);
1743 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1744 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1746 case RTE_FLOW_ACTION_TYPE_RSS:
1747 flow_verbs_translate_action_rss(rss_desc, actions);
1748 action_flags |= MLX5_FLOW_ACTION_RSS;
1749 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1751 case RTE_FLOW_ACTION_TYPE_COUNT:
1752 ret = flow_verbs_translate_action_count(dev_flow,
1757 action_flags |= MLX5_FLOW_ACTION_COUNT;
1760 return rte_flow_error_set(error, ENOTSUP,
1761 RTE_FLOW_ERROR_TYPE_ACTION,
1763 "action not supported");
1766 dev_flow->act_flags = action_flags;
1767 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1768 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1770 switch (items->type) {
1771 case RTE_FLOW_ITEM_TYPE_VOID:
1773 case RTE_FLOW_ITEM_TYPE_ETH:
1774 flow_verbs_translate_item_eth(dev_flow, items,
1776 subpriority = MLX5_PRIORITY_MAP_L2;
1777 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1778 MLX5_FLOW_LAYER_OUTER_L2;
1780 case RTE_FLOW_ITEM_TYPE_VLAN:
1781 flow_verbs_translate_item_vlan(dev_flow, items,
1783 subpriority = MLX5_PRIORITY_MAP_L2;
1784 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1785 MLX5_FLOW_LAYER_INNER_VLAN) :
1786 (MLX5_FLOW_LAYER_OUTER_L2 |
1787 MLX5_FLOW_LAYER_OUTER_VLAN);
1789 case RTE_FLOW_ITEM_TYPE_IPV4:
1790 flow_verbs_translate_item_ipv4(dev_flow, items,
1792 subpriority = MLX5_PRIORITY_MAP_L3;
1793 dev_flow->hash_fields |=
1794 mlx5_flow_hashfields_adjust
1796 MLX5_IPV4_LAYER_TYPES,
1797 MLX5_IPV4_IBV_RX_HASH);
1798 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1799 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1801 case RTE_FLOW_ITEM_TYPE_IPV6:
1802 flow_verbs_translate_item_ipv6(dev_flow, items,
1804 subpriority = MLX5_PRIORITY_MAP_L3;
1805 dev_flow->hash_fields |=
1806 mlx5_flow_hashfields_adjust
1808 MLX5_IPV6_LAYER_TYPES,
1809 MLX5_IPV6_IBV_RX_HASH);
1810 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1811 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1813 case RTE_FLOW_ITEM_TYPE_TCP:
1814 flow_verbs_translate_item_tcp(dev_flow, items,
1816 subpriority = MLX5_PRIORITY_MAP_L4;
1817 dev_flow->hash_fields |=
1818 mlx5_flow_hashfields_adjust
1819 (rss_desc, tunnel, ETH_RSS_TCP,
1820 (IBV_RX_HASH_SRC_PORT_TCP |
1821 IBV_RX_HASH_DST_PORT_TCP));
1822 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1823 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1825 case RTE_FLOW_ITEM_TYPE_UDP:
1826 flow_verbs_translate_item_udp(dev_flow, items,
1828 subpriority = MLX5_PRIORITY_MAP_L4;
1829 dev_flow->hash_fields |=
1830 mlx5_flow_hashfields_adjust
1831 (rss_desc, tunnel, ETH_RSS_UDP,
1832 (IBV_RX_HASH_SRC_PORT_UDP |
1833 IBV_RX_HASH_DST_PORT_UDP));
1834 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1835 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1837 case RTE_FLOW_ITEM_TYPE_VXLAN:
1838 flow_verbs_translate_item_vxlan(dev_flow, items,
1840 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1841 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1843 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1844 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1846 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1847 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1849 case RTE_FLOW_ITEM_TYPE_GRE:
1850 flow_verbs_translate_item_gre(dev_flow, items,
1852 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1853 item_flags |= MLX5_FLOW_LAYER_GRE;
1855 case RTE_FLOW_ITEM_TYPE_MPLS:
1856 flow_verbs_translate_item_mpls(dev_flow, items,
1858 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1859 item_flags |= MLX5_FLOW_LAYER_MPLS;
1862 return rte_flow_error_set(error, ENOTSUP,
1863 RTE_FLOW_ERROR_TYPE_ITEM,
1865 "item not supported");
1868 dev_flow->handle->layers = item_flags;
1869 /* Other members of attr will be ignored. */
1870 dev_flow->verbs.attr.priority =
1871 mlx5_flow_adjust_priority(dev, priority, subpriority);
1872 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1877 * Remove the flow from the NIC but keeps it in memory.
1880 * Pointer to the Ethernet device structure.
1881 * @param[in, out] flow
1882 * Pointer to flow structure.
1885 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1887 struct mlx5_priv *priv = dev->data->dev_private;
1888 struct mlx5_flow_handle *handle;
1889 uint32_t handle_idx;
1893 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1894 handle_idx, handle, next) {
1895 if (handle->drv_flow) {
1896 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1897 handle->drv_flow = NULL;
1899 /* hrxq is union, don't touch it only the flag is set. */
1900 if (handle->rix_hrxq) {
1901 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1902 mlx5_drop_action_destroy(dev);
1903 handle->rix_hrxq = 0;
1904 } else if (handle->fate_action ==
1905 MLX5_FLOW_FATE_QUEUE) {
1906 mlx5_hrxq_release(dev, handle->rix_hrxq);
1907 handle->rix_hrxq = 0;
1910 if (handle->vf_vlan.tag && handle->vf_vlan.created)
1911 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1916 * Remove the flow from the NIC and the memory.
1919 * Pointer to the Ethernet device structure.
1920 * @param[in, out] flow
1921 * Pointer to flow structure.
1924 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1926 struct mlx5_priv *priv = dev->data->dev_private;
1927 struct mlx5_flow_handle *handle;
1931 flow_verbs_remove(dev, flow);
1932 while (flow->dev_handles) {
1933 uint32_t tmp_idx = flow->dev_handles;
1935 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1939 flow->dev_handles = handle->next.next;
1940 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1943 if (flow->counter) {
1944 flow_verbs_counter_release(dev, flow->counter);
1950 * Apply the flow to the NIC.
1953 * Pointer to the Ethernet device structure.
1954 * @param[in, out] flow
1955 * Pointer to flow structure.
1957 * Pointer to error structure.
1960 * 0 on success, a negative errno value otherwise and rte_errno is set.
1963 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1964 struct rte_flow_error *error)
1966 struct mlx5_priv *priv = dev->data->dev_private;
1967 struct mlx5_flow_handle *handle;
1968 struct mlx5_flow *dev_flow;
1969 struct mlx5_hrxq *hrxq;
1970 uint32_t dev_handles;
1974 for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
1975 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
1976 handle = dev_flow->handle;
1977 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1978 hrxq = mlx5_drop_action_create(dev);
1982 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1983 "cannot get drop hash queue");
1988 struct mlx5_flow_rss_desc *rss_desc =
1989 &((struct mlx5_flow_rss_desc *)priv->rss_desc)
1990 [!!priv->flow_nested_idx];
1992 MLX5_ASSERT(rss_desc->queue_num);
1993 hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
1994 MLX5_RSS_HASH_KEY_LEN,
1995 dev_flow->hash_fields,
1997 rss_desc->queue_num);
1999 hrxq_idx = mlx5_hrxq_new
2000 (dev, rss_desc->key,
2001 MLX5_RSS_HASH_KEY_LEN,
2002 dev_flow->hash_fields,
2004 rss_desc->queue_num,
2006 MLX5_FLOW_LAYER_TUNNEL));
2007 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
2012 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2013 "cannot get hash queue");
2016 handle->rix_hrxq = hrxq_idx;
2019 handle->drv_flow = mlx5_glue->create_flow
2020 (hrxq->qp, &dev_flow->verbs.attr);
2021 if (!handle->drv_flow) {
2022 rte_flow_error_set(error, errno,
2023 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2025 "hardware refuses to create flow");
2028 if (priv->vmwa_context &&
2029 handle->vf_vlan.tag && !handle->vf_vlan.created) {
2031 * The rule contains the VLAN pattern.
2032 * For VF we are going to create VLAN
2033 * interface to make hypervisor set correct
2034 * e-Switch vport context.
2036 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2041 err = rte_errno; /* Save rte_errno before cleanup. */
2042 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2043 dev_handles, handle, next) {
2044 /* hrxq is union, don't touch it only the flag is set. */
2045 if (handle->rix_hrxq) {
2046 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
2047 mlx5_drop_action_destroy(dev);
2048 handle->rix_hrxq = 0;
2049 } else if (handle->fate_action ==
2050 MLX5_FLOW_FATE_QUEUE) {
2051 mlx5_hrxq_release(dev, handle->rix_hrxq);
2052 handle->rix_hrxq = 0;
2055 if (handle->vf_vlan.tag && handle->vf_vlan.created)
2056 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2058 rte_errno = err; /* Restore rte_errno. */
2065 * @see rte_flow_query()
2069 flow_verbs_query(struct rte_eth_dev *dev,
2070 struct rte_flow *flow,
2071 const struct rte_flow_action *actions,
2073 struct rte_flow_error *error)
2077 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2078 switch (actions->type) {
2079 case RTE_FLOW_ACTION_TYPE_VOID:
2081 case RTE_FLOW_ACTION_TYPE_COUNT:
2082 ret = flow_verbs_counter_query(dev, flow, data, error);
2085 return rte_flow_error_set(error, ENOTSUP,
2086 RTE_FLOW_ERROR_TYPE_ACTION,
2088 "action not supported");
2094 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2095 .validate = flow_verbs_validate,
2096 .prepare = flow_verbs_prepare,
2097 .translate = flow_verbs_translate,
2098 .apply = flow_verbs_apply,
2099 .remove = flow_verbs_remove,
2100 .destroy = flow_verbs_destroy,
2101 .query = flow_verbs_query,