1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
5 #include <netinet/in.h>
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
19 #include <mlx5_glue.h>
21 #include <mlx5_malloc.h>
23 #include "mlx5_defs.h"
25 #include "mlx5_flow.h"
28 #define VERBS_SPEC_INNER(item_flags) \
29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 { 9, 10, 11 }, { 12, 13, 14 },
42 /* Verbs specification header. */
43 struct ibv_spec_header {
44 enum ibv_flow_spec_type type;
49 * Discover the maximum number of priority available.
52 * Pointer to the Ethernet device structure.
55 * number of supported flow priority on success, a negative errno
56 * value otherwise and rte_errno is set.
59 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
61 struct mlx5_priv *priv = dev->data->dev_private;
63 struct ibv_flow_attr attr;
64 struct ibv_flow_spec_eth eth;
65 struct ibv_flow_spec_action_drop drop;
69 .port = (uint8_t)priv->dev_port,
72 .type = IBV_FLOW_SPEC_ETH,
73 .size = sizeof(struct ibv_flow_spec_eth),
76 .size = sizeof(struct ibv_flow_spec_action_drop),
77 .type = IBV_FLOW_SPEC_ACTION_DROP,
80 struct ibv_flow *flow;
81 struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
82 uint16_t vprio[] = { 8, 16 };
90 for (i = 0; i != RTE_DIM(vprio); i++) {
91 flow_attr.attr.priority = vprio[i] - 1;
92 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
95 claim_zero(mlx5_glue->destroy_flow(flow));
100 priority = RTE_DIM(priority_map_3);
103 priority = RTE_DIM(priority_map_5);
108 "port %u verbs maximum priority: %d expected 8/16",
109 dev->data->port_id, priority);
112 DRV_LOG(INFO, "port %u supported flow priorities:"
113 " 0-%d for ingress or egress root table,"
114 " 0-%d for non-root table or transfer root table.",
115 dev->data->port_id, priority - 2,
116 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
121 * Adjust flow priority based on the highest layer and the request priority.
124 * Pointer to the Ethernet device structure.
125 * @param[in] priority
126 * The rule base priority.
127 * @param[in] subpriority
128 * The priority based on the items.
134 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
135 uint32_t subpriority)
138 struct mlx5_priv *priv = dev->data->dev_private;
140 switch (priv->config.flow_prio) {
141 case RTE_DIM(priority_map_3):
142 res = priority_map_3[priority][subpriority];
144 case RTE_DIM(priority_map_5):
145 res = priority_map_5[priority][subpriority];
152 * Get Verbs flow counter by index.
155 * Pointer to the Ethernet device structure.
157 * mlx5 flow counter index in the container.
159 * mlx5 flow counter pool in the container,
162 * A pointer to the counter, NULL otherwise.
164 static struct mlx5_flow_counter *
165 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
167 struct mlx5_flow_counter_pool **ppool)
169 struct mlx5_priv *priv = dev->data->dev_private;
170 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
171 struct mlx5_flow_counter_pool *pool;
173 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
174 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
178 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
182 * Create Verbs flow counter with Verbs library.
185 * Pointer to the Ethernet device structure.
186 * @param[in, out] counter
187 * mlx5 flow counter object, contains the counter id,
188 * handle of created Verbs flow counter is returned
189 * in cs field (if counters are supported).
192 * 0 On success else a negative errno value is returned
193 * and rte_errno is set.
196 flow_verbs_counter_create(struct rte_eth_dev *dev,
197 struct mlx5_flow_counter *counter)
199 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
200 struct mlx5_priv *priv = dev->data->dev_private;
201 struct ibv_context *ctx = priv->sh->ctx;
202 struct ibv_counter_set_init_attr init = {
203 .counter_set_id = counter->shared_info.id};
205 counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
206 if (!counter->dcs_when_free) {
211 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
212 struct mlx5_priv *priv = dev->data->dev_private;
213 struct ibv_context *ctx = priv->sh->ctx;
214 struct ibv_counters_init_attr init = {0};
215 struct ibv_counter_attach_attr attach;
218 memset(&attach, 0, sizeof(attach));
219 counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
220 if (!counter->dcs_when_free) {
224 attach.counter_desc = IBV_COUNTER_PACKETS;
226 ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
228 attach.counter_desc = IBV_COUNTER_BYTES;
230 ret = mlx5_glue->attach_counters
231 (counter->dcs_when_free, &attach, NULL);
234 claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
235 counter->dcs_when_free = NULL;
249 * Get a flow counter.
252 * Pointer to the Ethernet device structure.
254 * Indicate if this counter is shared with other flows.
256 * Counter identifier.
259 * Index to the counter, 0 otherwise and rte_errno is set.
262 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
264 struct mlx5_priv *priv = dev->data->dev_private;
265 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
266 struct mlx5_flow_counter_pool *pool = NULL;
267 struct mlx5_flow_counter *cnt = NULL;
268 union mlx5_l3t_data data;
269 uint32_t n_valid = cmng->n_valid;
270 uint32_t pool_idx, cnt_idx;
274 if (shared && !mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) &&
277 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
278 pool = cmng->pools[pool_idx];
281 cnt = TAILQ_FIRST(&pool->counters[0]);
286 struct mlx5_flow_counter_pool **pools;
289 if (n_valid == cmng->n) {
290 /* Resize the container pool array. */
291 size = sizeof(struct mlx5_flow_counter_pool *) *
292 (n_valid + MLX5_CNT_CONTAINER_RESIZE);
293 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
298 memcpy(pools, cmng->pools,
299 sizeof(struct mlx5_flow_counter_pool *) *
301 mlx5_free(cmng->pools);
304 cmng->n += MLX5_CNT_CONTAINER_RESIZE;
306 /* Allocate memory for new pool*/
307 size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
308 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
311 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
312 cnt = MLX5_POOL_GET_CNT(pool, i);
313 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
315 cnt = MLX5_POOL_GET_CNT(pool, 0);
316 cmng->pools[n_valid] = pool;
320 TAILQ_REMOVE(&pool->counters[0], cnt, next);
321 i = MLX5_CNT_ARRAY_IDX(pool, cnt);
322 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
324 data.dword = cnt_idx;
325 if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
327 cnt->shared_info.id = id;
328 cnt_idx |= MLX5_CNT_SHARED_OFFSET;
330 /* Create counter with Verbs. */
331 ret = flow_verbs_counter_create(dev, cnt);
333 cnt->dcs_when_active = cnt->dcs_when_free;
338 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
339 /* Some error occurred in Verbs library. */
345 * Release a flow counter.
348 * Pointer to the Ethernet device structure.
350 * Index to the counter handler.
353 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
355 struct mlx5_priv *priv = dev->data->dev_private;
356 struct mlx5_flow_counter_pool *pool;
357 struct mlx5_flow_counter *cnt;
359 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
360 if (IS_LEGACY_SHARED_CNT(counter) &&
361 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
363 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
364 claim_zero(mlx5_glue->destroy_counter_set
365 ((struct ibv_counter_set *)cnt->dcs_when_active));
366 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
367 claim_zero(mlx5_glue->destroy_counters
368 ((struct ibv_counters *)cnt->dcs_when_active));
370 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
374 * Query a flow counter via Verbs library call.
376 * @see rte_flow_query()
380 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
381 struct rte_flow *flow, void *data,
382 struct rte_flow_error *error)
384 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
385 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
387 struct mlx5_flow_counter_pool *pool;
388 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
389 (dev, flow->counter, &pool);
390 struct rte_flow_query_count *qc = data;
391 uint64_t counters[2] = {0, 0};
392 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
393 struct ibv_query_counter_set_attr query_cs_attr = {
394 .dcs_when_free = (struct ibv_counter_set *)
395 cnt->dcs_when_active,
396 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
398 struct ibv_counter_set_data query_out = {
400 .outlen = 2 * sizeof(uint64_t),
402 int err = mlx5_glue->query_counter_set(&query_cs_attr,
404 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
405 int err = mlx5_glue->query_counters
406 ((struct ibv_counters *)cnt->dcs_when_active, counters,
408 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
411 return rte_flow_error_set
413 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
415 "cannot read counter");
418 qc->hits = counters[0] - cnt->hits;
419 qc->bytes = counters[1] - cnt->bytes;
421 cnt->hits = counters[0];
422 cnt->bytes = counters[1];
426 return rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
429 "flow does not have counter");
433 return rte_flow_error_set(error, ENOTSUP,
434 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
436 "counters are not available");
441 * Add a verbs item specification into @p verbs.
444 * Pointer to verbs structure.
446 * Create specification.
448 * Size in bytes of the specification to copy.
451 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
452 void *src, unsigned int size)
458 MLX5_ASSERT(verbs->specs);
459 dst = (void *)(verbs->specs + verbs->size);
460 memcpy(dst, src, size);
461 ++verbs->attr.num_of_specs;
466 * Convert the @p item into a Verbs specification. This function assumes that
467 * the input is valid and that there is space to insert the requested item
470 * @param[in, out] dev_flow
471 * Pointer to dev_flow structure.
473 * Item specification.
474 * @param[in] item_flags
478 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
479 const struct rte_flow_item *item,
482 const struct rte_flow_item_eth *spec = item->spec;
483 const struct rte_flow_item_eth *mask = item->mask;
484 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
485 struct ibv_flow_spec_eth eth = {
486 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
491 mask = &rte_flow_item_eth_mask;
495 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
497 memcpy(ð.val.src_mac, spec->src.addr_bytes,
499 eth.val.ether_type = spec->type;
500 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
502 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
504 eth.mask.ether_type = mask->type;
505 /* Remove unwanted bits from values. */
506 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
507 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
508 eth.val.src_mac[i] &= eth.mask.src_mac[i];
510 eth.val.ether_type &= eth.mask.ether_type;
512 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
516 * Update the VLAN tag in the Verbs Ethernet specification.
517 * This function assumes that the input is valid and there is space to add
518 * the requested item.
520 * @param[in, out] attr
521 * Pointer to Verbs attributes structure.
523 * Verbs structure containing the VLAN information to copy.
526 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
527 struct ibv_flow_spec_eth *eth)
530 const enum ibv_flow_spec_type search = eth->type;
531 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
532 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
534 for (i = 0; i != attr->num_of_specs; ++i) {
535 if (hdr->type == search) {
536 struct ibv_flow_spec_eth *e =
537 (struct ibv_flow_spec_eth *)hdr;
539 e->val.vlan_tag = eth->val.vlan_tag;
540 e->mask.vlan_tag = eth->mask.vlan_tag;
541 e->val.ether_type = eth->val.ether_type;
542 e->mask.ether_type = eth->mask.ether_type;
545 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
550 * Convert the @p item into a Verbs specification. This function assumes that
551 * the input is valid and that there is space to insert the requested item
554 * @param[in, out] dev_flow
555 * Pointer to dev_flow structure.
557 * Item specification.
558 * @param[in] item_flags
562 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
563 const struct rte_flow_item *item,
566 const struct rte_flow_item_vlan *spec = item->spec;
567 const struct rte_flow_item_vlan *mask = item->mask;
568 unsigned int size = sizeof(struct ibv_flow_spec_eth);
569 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
570 struct ibv_flow_spec_eth eth = {
571 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
574 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
575 MLX5_FLOW_LAYER_OUTER_L2;
578 mask = &rte_flow_item_vlan_mask;
580 eth.val.vlan_tag = spec->tci;
581 eth.mask.vlan_tag = mask->tci;
582 eth.val.vlan_tag &= eth.mask.vlan_tag;
583 eth.val.ether_type = spec->inner_type;
584 eth.mask.ether_type = mask->inner_type;
585 eth.val.ether_type &= eth.mask.ether_type;
587 if (!(item_flags & l2m))
588 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
590 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
592 dev_flow->handle->vf_vlan.tag =
593 rte_be_to_cpu_16(spec->tci) & 0x0fff;
597 * Convert the @p item into a Verbs specification. This function assumes that
598 * the input is valid and that there is space to insert the requested item
601 * @param[in, out] dev_flow
602 * Pointer to dev_flow structure.
604 * Item specification.
605 * @param[in] item_flags
609 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
610 const struct rte_flow_item *item,
613 const struct rte_flow_item_ipv4 *spec = item->spec;
614 const struct rte_flow_item_ipv4 *mask = item->mask;
615 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
616 struct ibv_flow_spec_ipv4_ext ipv4 = {
617 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
622 mask = &rte_flow_item_ipv4_mask;
624 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
625 .src_ip = spec->hdr.src_addr,
626 .dst_ip = spec->hdr.dst_addr,
627 .proto = spec->hdr.next_proto_id,
628 .tos = spec->hdr.type_of_service,
630 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
631 .src_ip = mask->hdr.src_addr,
632 .dst_ip = mask->hdr.dst_addr,
633 .proto = mask->hdr.next_proto_id,
634 .tos = mask->hdr.type_of_service,
636 /* Remove unwanted bits from values. */
637 ipv4.val.src_ip &= ipv4.mask.src_ip;
638 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
639 ipv4.val.proto &= ipv4.mask.proto;
640 ipv4.val.tos &= ipv4.mask.tos;
642 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
646 * Convert the @p item into a Verbs specification. This function assumes that
647 * the input is valid and that there is space to insert the requested item
650 * @param[in, out] dev_flow
651 * Pointer to dev_flow structure.
653 * Item specification.
654 * @param[in] item_flags
658 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
659 const struct rte_flow_item *item,
662 const struct rte_flow_item_ipv6 *spec = item->spec;
663 const struct rte_flow_item_ipv6 *mask = item->mask;
664 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
665 struct ibv_flow_spec_ipv6 ipv6 = {
666 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
671 mask = &rte_flow_item_ipv6_mask;
674 uint32_t vtc_flow_val;
675 uint32_t vtc_flow_mask;
677 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
678 RTE_DIM(ipv6.val.src_ip));
679 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
680 RTE_DIM(ipv6.val.dst_ip));
681 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
682 RTE_DIM(ipv6.mask.src_ip));
683 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
684 RTE_DIM(ipv6.mask.dst_ip));
685 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
686 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
687 ipv6.val.flow_label =
688 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
689 RTE_IPV6_HDR_FL_SHIFT);
690 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
691 RTE_IPV6_HDR_TC_SHIFT;
692 ipv6.val.next_hdr = spec->hdr.proto;
693 ipv6.mask.flow_label =
694 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
695 RTE_IPV6_HDR_FL_SHIFT);
696 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
697 RTE_IPV6_HDR_TC_SHIFT;
698 ipv6.mask.next_hdr = mask->hdr.proto;
699 /* Remove unwanted bits from values. */
700 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
701 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
702 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
704 ipv6.val.flow_label &= ipv6.mask.flow_label;
705 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
706 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
708 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
712 * Convert the @p item into a Verbs specification. This function assumes that
713 * the input is valid and that there is space to insert the requested item
716 * @param[in, out] dev_flow
717 * Pointer to dev_flow structure.
719 * Item specification.
720 * @param[in] item_flags
724 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
725 const struct rte_flow_item *item,
726 uint64_t item_flags __rte_unused)
728 const struct rte_flow_item_tcp *spec = item->spec;
729 const struct rte_flow_item_tcp *mask = item->mask;
730 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
731 struct ibv_flow_spec_tcp_udp tcp = {
732 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
737 mask = &rte_flow_item_tcp_mask;
739 tcp.val.dst_port = spec->hdr.dst_port;
740 tcp.val.src_port = spec->hdr.src_port;
741 tcp.mask.dst_port = mask->hdr.dst_port;
742 tcp.mask.src_port = mask->hdr.src_port;
743 /* Remove unwanted bits from values. */
744 tcp.val.src_port &= tcp.mask.src_port;
745 tcp.val.dst_port &= tcp.mask.dst_port;
747 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
751 * Convert the @p item into a Verbs specification. This function assumes that
752 * the input is valid and that there is space to insert the requested item
755 * @param[in, out] dev_flow
756 * Pointer to dev_flow structure.
758 * Item specification.
759 * @param[in] item_flags
763 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
764 const struct rte_flow_item *item,
765 uint64_t item_flags __rte_unused)
767 const struct rte_flow_item_udp *spec = item->spec;
768 const struct rte_flow_item_udp *mask = item->mask;
769 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
770 struct ibv_flow_spec_tcp_udp udp = {
771 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
776 mask = &rte_flow_item_udp_mask;
778 udp.val.dst_port = spec->hdr.dst_port;
779 udp.val.src_port = spec->hdr.src_port;
780 udp.mask.dst_port = mask->hdr.dst_port;
781 udp.mask.src_port = mask->hdr.src_port;
782 /* Remove unwanted bits from values. */
783 udp.val.src_port &= udp.mask.src_port;
784 udp.val.dst_port &= udp.mask.dst_port;
787 while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
789 if (!(udp.val.dst_port & udp.mask.dst_port)) {
790 switch ((item)->type) {
791 case RTE_FLOW_ITEM_TYPE_VXLAN:
792 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
793 udp.mask.dst_port = 0xffff;
795 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
796 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
797 udp.mask.dst_port = 0xffff;
799 case RTE_FLOW_ITEM_TYPE_MPLS:
800 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
801 udp.mask.dst_port = 0xffff;
808 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
812 * Convert the @p item into a Verbs specification. This function assumes that
813 * the input is valid and that there is space to insert the requested item
816 * @param[in, out] dev_flow
817 * Pointer to dev_flow structure.
819 * Item specification.
820 * @param[in] item_flags
824 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
825 const struct rte_flow_item *item,
826 uint64_t item_flags __rte_unused)
828 const struct rte_flow_item_vxlan *spec = item->spec;
829 const struct rte_flow_item_vxlan *mask = item->mask;
830 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
831 struct ibv_flow_spec_tunnel vxlan = {
832 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
838 } id = { .vlan_id = 0, };
841 mask = &rte_flow_item_vxlan_mask;
843 memcpy(&id.vni[1], spec->vni, 3);
844 vxlan.val.tunnel_id = id.vlan_id;
845 memcpy(&id.vni[1], mask->vni, 3);
846 vxlan.mask.tunnel_id = id.vlan_id;
847 /* Remove unwanted bits from values. */
848 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
850 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
854 * Convert the @p item into a Verbs specification. This function assumes that
855 * the input is valid and that there is space to insert the requested item
858 * @param[in, out] dev_flow
859 * Pointer to dev_flow structure.
861 * Item specification.
862 * @param[in] item_flags
866 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
867 const struct rte_flow_item *item,
868 uint64_t item_flags __rte_unused)
870 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
871 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
872 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
873 struct ibv_flow_spec_tunnel vxlan_gpe = {
874 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
880 } id = { .vlan_id = 0, };
883 mask = &rte_flow_item_vxlan_gpe_mask;
885 memcpy(&id.vni[1], spec->vni, 3);
886 vxlan_gpe.val.tunnel_id = id.vlan_id;
887 memcpy(&id.vni[1], mask->vni, 3);
888 vxlan_gpe.mask.tunnel_id = id.vlan_id;
889 /* Remove unwanted bits from values. */
890 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
892 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
896 * Update the protocol in Verbs IPv4/IPv6 spec.
898 * @param[in, out] attr
899 * Pointer to Verbs attributes structure.
901 * Specification type to search in order to update the IP protocol.
902 * @param[in] protocol
903 * Protocol value to set if none is present in the specification.
906 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
907 enum ibv_flow_spec_type search,
911 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
912 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
916 for (i = 0; i != attr->num_of_specs; ++i) {
917 if (hdr->type == search) {
919 struct ibv_flow_spec_ipv4_ext *ipv4;
920 struct ibv_flow_spec_ipv6 *ipv6;
924 case IBV_FLOW_SPEC_IPV4_EXT:
925 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
926 if (!ip.ipv4->val.proto) {
927 ip.ipv4->val.proto = protocol;
928 ip.ipv4->mask.proto = 0xff;
931 case IBV_FLOW_SPEC_IPV6:
932 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
933 if (!ip.ipv6->val.next_hdr) {
934 ip.ipv6->val.next_hdr = protocol;
935 ip.ipv6->mask.next_hdr = 0xff;
943 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
948 * Convert the @p item into a Verbs specification. This function assumes that
949 * the input is valid and that there is space to insert the requested item
952 * @param[in, out] dev_flow
953 * Pointer to dev_flow structure.
955 * Item specification.
956 * @param[in] item_flags
960 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
961 const struct rte_flow_item *item __rte_unused,
964 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
965 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
966 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
967 struct ibv_flow_spec_tunnel tunnel = {
968 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
972 const struct rte_flow_item_gre *spec = item->spec;
973 const struct rte_flow_item_gre *mask = item->mask;
974 unsigned int size = sizeof(struct ibv_flow_spec_gre);
975 struct ibv_flow_spec_gre tunnel = {
976 .type = IBV_FLOW_SPEC_GRE,
981 mask = &rte_flow_item_gre_mask;
983 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
984 tunnel.val.protocol = spec->protocol;
985 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
986 tunnel.mask.protocol = mask->protocol;
987 /* Remove unwanted bits from values. */
988 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
989 tunnel.val.protocol &= tunnel.mask.protocol;
990 tunnel.val.key &= tunnel.mask.key;
993 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
994 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
995 IBV_FLOW_SPEC_IPV4_EXT,
998 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
1001 flow_verbs_spec_add(verbs, &tunnel, size);
1005 * Convert the @p action into a Verbs specification. This function assumes that
1006 * the input is valid and that there is space to insert the requested action
1007 * into the flow. This function also return the action that was added.
1009 * @param[in, out] dev_flow
1010 * Pointer to dev_flow structure.
1012 * Item specification.
1013 * @param[in] item_flags
1014 * Parsed item flags.
1017 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1018 const struct rte_flow_item *item __rte_unused,
1019 uint64_t item_flags __rte_unused)
1021 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1022 const struct rte_flow_item_mpls *spec = item->spec;
1023 const struct rte_flow_item_mpls *mask = item->mask;
1024 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1025 struct ibv_flow_spec_mpls mpls = {
1026 .type = IBV_FLOW_SPEC_MPLS,
1031 mask = &rte_flow_item_mpls_mask;
1033 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1034 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1035 /* Remove unwanted bits from values. */
1036 mpls.val.label &= mpls.mask.label;
1038 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1043 * Convert the @p action into a Verbs specification. This function assumes that
1044 * the input is valid and that there is space to insert the requested action
1047 * @param[in] dev_flow
1048 * Pointer to mlx5_flow.
1050 * Action configuration.
1053 flow_verbs_translate_action_drop
1054 (struct mlx5_flow *dev_flow,
1055 const struct rte_flow_action *action __rte_unused)
1057 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1058 struct ibv_flow_spec_action_drop drop = {
1059 .type = IBV_FLOW_SPEC_ACTION_DROP,
1063 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1067 * Convert the @p action into a Verbs specification. This function assumes that
1068 * the input is valid and that there is space to insert the requested action
1071 * @param[in] rss_desc
1072 * Pointer to mlx5_flow_rss_desc.
1074 * Action configuration.
1077 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1078 const struct rte_flow_action *action)
1080 const struct rte_flow_action_queue *queue = action->conf;
1082 rss_desc->queue[0] = queue->index;
1083 rss_desc->queue_num = 1;
1087 * Convert the @p action into a Verbs specification. This function assumes that
1088 * the input is valid and that there is space to insert the requested action
1091 * @param[in] rss_desc
1092 * Pointer to mlx5_flow_rss_desc.
1094 * Action configuration.
1097 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1098 const struct rte_flow_action *action)
1100 const struct rte_flow_action_rss *rss = action->conf;
1101 const uint8_t *rss_key;
1103 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1104 rss_desc->queue_num = rss->queue_num;
1105 /* NULL RSS key indicates default RSS key. */
1106 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1107 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1109 * rss->level and rss.types should be set in advance when expanding
1115 * Convert the @p action into a Verbs specification. This function assumes that
1116 * the input is valid and that there is space to insert the requested action
1119 * @param[in] dev_flow
1120 * Pointer to mlx5_flow.
1122 * Action configuration.
1125 flow_verbs_translate_action_flag
1126 (struct mlx5_flow *dev_flow,
1127 const struct rte_flow_action *action __rte_unused)
1129 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1130 struct ibv_flow_spec_action_tag tag = {
1131 .type = IBV_FLOW_SPEC_ACTION_TAG,
1133 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1136 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1140 * Convert the @p action into a Verbs specification. This function assumes that
1141 * the input is valid and that there is space to insert the requested action
1144 * @param[in] dev_flow
1145 * Pointer to mlx5_flow.
1147 * Action configuration.
1150 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1151 const struct rte_flow_action *action)
1153 const struct rte_flow_action_mark *mark = action->conf;
1154 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1155 struct ibv_flow_spec_action_tag tag = {
1156 .type = IBV_FLOW_SPEC_ACTION_TAG,
1158 .tag_id = mlx5_flow_mark_set(mark->id),
1161 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1165 * Convert the @p action into a Verbs specification. This function assumes that
1166 * the input is valid and that there is space to insert the requested action
1170 * Pointer to the Ethernet device structure.
1172 * Action configuration.
1173 * @param[in] dev_flow
1174 * Pointer to mlx5_flow.
1176 * Pointer to error structure.
1179 * 0 On success else a negative errno value is returned and rte_errno is set.
1182 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1183 const struct rte_flow_action *action,
1184 struct rte_eth_dev *dev,
1185 struct rte_flow_error *error)
1187 const struct rte_flow_action_count *count = action->conf;
1188 struct rte_flow *flow = dev_flow->flow;
1189 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1190 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1191 struct mlx5_flow_counter_pool *pool;
1192 struct mlx5_flow_counter *cnt = NULL;
1193 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1194 struct ibv_flow_spec_counter_action counter = {
1195 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1200 if (!flow->counter) {
1201 flow->counter = flow_verbs_counter_new(dev, count->shared,
1204 return rte_flow_error_set(error, rte_errno,
1205 RTE_FLOW_ERROR_TYPE_ACTION,
1207 "cannot get counter"
1210 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1211 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1212 counter.counter_set_handle =
1213 ((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1214 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1215 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1216 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1217 counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1218 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1224 * Internal validation function. For validating both actions and items.
1227 * Pointer to the Ethernet device structure.
1229 * Pointer to the flow attributes.
1231 * Pointer to the list of items.
1232 * @param[in] actions
1233 * Pointer to the list of actions.
1234 * @param[in] external
1235 * This flow rule is created by request external to PMD.
1236 * @param[in] hairpin
1237 * Number of hairpin TX actions, 0 means classic flow.
1239 * Pointer to the error structure.
1242 * 0 on success, a negative errno value otherwise and rte_errno is set.
1245 flow_verbs_validate(struct rte_eth_dev *dev,
1246 const struct rte_flow_attr *attr,
1247 const struct rte_flow_item items[],
1248 const struct rte_flow_action actions[],
1249 bool external __rte_unused,
1250 int hairpin __rte_unused,
1251 struct rte_flow_error *error)
1254 uint64_t action_flags = 0;
1255 uint64_t item_flags = 0;
1256 uint64_t last_item = 0;
1257 uint8_t next_protocol = 0xff;
1258 uint16_t ether_type = 0;
1259 bool is_empty_vlan = false;
1263 ret = mlx5_flow_validate_attributes(dev, attr, error);
1266 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1267 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1270 switch (items->type) {
1271 case RTE_FLOW_ITEM_TYPE_VOID:
1273 case RTE_FLOW_ITEM_TYPE_ETH:
1274 ret = mlx5_flow_validate_item_eth(items, item_flags,
1278 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1279 MLX5_FLOW_LAYER_OUTER_L2;
1280 if (items->mask != NULL && items->spec != NULL) {
1282 ((const struct rte_flow_item_eth *)
1285 ((const struct rte_flow_item_eth *)
1287 if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1288 is_empty_vlan = true;
1289 ether_type = rte_be_to_cpu_16(ether_type);
1294 case RTE_FLOW_ITEM_TYPE_VLAN:
1295 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1299 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1300 MLX5_FLOW_LAYER_INNER_VLAN) :
1301 (MLX5_FLOW_LAYER_OUTER_L2 |
1302 MLX5_FLOW_LAYER_OUTER_VLAN);
1303 if (items->mask != NULL && items->spec != NULL) {
1305 ((const struct rte_flow_item_vlan *)
1306 items->spec)->inner_type;
1308 ((const struct rte_flow_item_vlan *)
1309 items->mask)->inner_type;
1310 ether_type = rte_be_to_cpu_16(ether_type);
1314 is_empty_vlan = false;
1316 case RTE_FLOW_ITEM_TYPE_IPV4:
1317 ret = mlx5_flow_validate_item_ipv4
1319 last_item, ether_type, NULL,
1320 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1324 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1325 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1326 if (items->mask != NULL &&
1327 ((const struct rte_flow_item_ipv4 *)
1328 items->mask)->hdr.next_proto_id) {
1330 ((const struct rte_flow_item_ipv4 *)
1331 (items->spec))->hdr.next_proto_id;
1333 ((const struct rte_flow_item_ipv4 *)
1334 (items->mask))->hdr.next_proto_id;
1336 /* Reset for inner layer. */
1337 next_protocol = 0xff;
1340 case RTE_FLOW_ITEM_TYPE_IPV6:
1341 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1347 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1348 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1349 if (items->mask != NULL &&
1350 ((const struct rte_flow_item_ipv6 *)
1351 items->mask)->hdr.proto) {
1353 ((const struct rte_flow_item_ipv6 *)
1354 items->spec)->hdr.proto;
1356 ((const struct rte_flow_item_ipv6 *)
1357 items->mask)->hdr.proto;
1359 /* Reset for inner layer. */
1360 next_protocol = 0xff;
1363 case RTE_FLOW_ITEM_TYPE_UDP:
1364 ret = mlx5_flow_validate_item_udp(items, item_flags,
1369 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1370 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1372 case RTE_FLOW_ITEM_TYPE_TCP:
1373 ret = mlx5_flow_validate_item_tcp
1376 &rte_flow_item_tcp_mask,
1380 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1381 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1383 case RTE_FLOW_ITEM_TYPE_VXLAN:
1384 ret = mlx5_flow_validate_item_vxlan(dev, items,
1389 last_item = MLX5_FLOW_LAYER_VXLAN;
1391 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1392 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1397 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1399 case RTE_FLOW_ITEM_TYPE_GRE:
1400 ret = mlx5_flow_validate_item_gre(items, item_flags,
1401 next_protocol, error);
1404 last_item = MLX5_FLOW_LAYER_GRE;
1406 case RTE_FLOW_ITEM_TYPE_MPLS:
1407 ret = mlx5_flow_validate_item_mpls(dev, items,
1412 last_item = MLX5_FLOW_LAYER_MPLS;
1414 case RTE_FLOW_ITEM_TYPE_ICMP:
1415 case RTE_FLOW_ITEM_TYPE_ICMP6:
1416 return rte_flow_error_set(error, ENOTSUP,
1417 RTE_FLOW_ERROR_TYPE_ITEM,
1419 "item not supported");
1421 return rte_flow_error_set(error, ENOTSUP,
1422 RTE_FLOW_ERROR_TYPE_ITEM,
1423 NULL, "item not supported");
1425 item_flags |= last_item;
1428 return rte_flow_error_set(error, ENOTSUP,
1429 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1430 "VLAN matching without vid specification is not supported");
1431 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1432 switch (actions->type) {
1433 case RTE_FLOW_ACTION_TYPE_VOID:
1435 case RTE_FLOW_ACTION_TYPE_FLAG:
1436 ret = mlx5_flow_validate_action_flag(action_flags,
1441 action_flags |= MLX5_FLOW_ACTION_FLAG;
1443 case RTE_FLOW_ACTION_TYPE_MARK:
1444 ret = mlx5_flow_validate_action_mark(actions,
1450 action_flags |= MLX5_FLOW_ACTION_MARK;
1452 case RTE_FLOW_ACTION_TYPE_DROP:
1453 ret = mlx5_flow_validate_action_drop(action_flags,
1458 action_flags |= MLX5_FLOW_ACTION_DROP;
1460 case RTE_FLOW_ACTION_TYPE_QUEUE:
1461 ret = mlx5_flow_validate_action_queue(actions,
1467 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1469 case RTE_FLOW_ACTION_TYPE_RSS:
1470 ret = mlx5_flow_validate_action_rss(actions,
1476 action_flags |= MLX5_FLOW_ACTION_RSS;
1478 case RTE_FLOW_ACTION_TYPE_COUNT:
1479 ret = mlx5_flow_validate_action_count(dev, attr, error);
1482 action_flags |= MLX5_FLOW_ACTION_COUNT;
1485 return rte_flow_error_set(error, ENOTSUP,
1486 RTE_FLOW_ERROR_TYPE_ACTION,
1488 "action not supported");
1492 * Validate the drop action mutual exclusion with other actions.
1493 * Drop action is mutually-exclusive with any other action, except for
1496 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1497 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1498 return rte_flow_error_set(error, EINVAL,
1499 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1500 "Drop action is mutually-exclusive "
1501 "with any other action, except for "
1503 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1504 return rte_flow_error_set(error, EINVAL,
1505 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1506 "no fate action is found");
1511 * Calculate the required bytes that are needed for the action part of the verbs
1514 * @param[in] actions
1515 * Pointer to the list of actions.
1518 * The size of the memory needed for all actions.
1521 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1525 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1526 switch (actions->type) {
1527 case RTE_FLOW_ACTION_TYPE_VOID:
1529 case RTE_FLOW_ACTION_TYPE_FLAG:
1530 size += sizeof(struct ibv_flow_spec_action_tag);
1532 case RTE_FLOW_ACTION_TYPE_MARK:
1533 size += sizeof(struct ibv_flow_spec_action_tag);
1535 case RTE_FLOW_ACTION_TYPE_DROP:
1536 size += sizeof(struct ibv_flow_spec_action_drop);
1538 case RTE_FLOW_ACTION_TYPE_QUEUE:
1540 case RTE_FLOW_ACTION_TYPE_RSS:
1542 case RTE_FLOW_ACTION_TYPE_COUNT:
1543 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1544 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1545 size += sizeof(struct ibv_flow_spec_counter_action);
1556 * Calculate the required bytes that are needed for the item part of the verbs
1560 * Pointer to the list of items.
1563 * The size of the memory needed for all items.
1566 flow_verbs_get_items_size(const struct rte_flow_item items[])
1570 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1571 switch (items->type) {
1572 case RTE_FLOW_ITEM_TYPE_VOID:
1574 case RTE_FLOW_ITEM_TYPE_ETH:
1575 size += sizeof(struct ibv_flow_spec_eth);
1577 case RTE_FLOW_ITEM_TYPE_VLAN:
1578 size += sizeof(struct ibv_flow_spec_eth);
1580 case RTE_FLOW_ITEM_TYPE_IPV4:
1581 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1583 case RTE_FLOW_ITEM_TYPE_IPV6:
1584 size += sizeof(struct ibv_flow_spec_ipv6);
1586 case RTE_FLOW_ITEM_TYPE_UDP:
1587 size += sizeof(struct ibv_flow_spec_tcp_udp);
1589 case RTE_FLOW_ITEM_TYPE_TCP:
1590 size += sizeof(struct ibv_flow_spec_tcp_udp);
1592 case RTE_FLOW_ITEM_TYPE_VXLAN:
1593 size += sizeof(struct ibv_flow_spec_tunnel);
1595 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1596 size += sizeof(struct ibv_flow_spec_tunnel);
1598 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1599 case RTE_FLOW_ITEM_TYPE_GRE:
1600 size += sizeof(struct ibv_flow_spec_gre);
1602 case RTE_FLOW_ITEM_TYPE_MPLS:
1603 size += sizeof(struct ibv_flow_spec_mpls);
1606 case RTE_FLOW_ITEM_TYPE_GRE:
1607 size += sizeof(struct ibv_flow_spec_tunnel);
1618 * Internal preparation function. Allocate mlx5_flow with the required size.
1619 * The required size is calculate based on the actions and items. This function
1620 * also returns the detected actions and items for later use.
1623 * Pointer to Ethernet device.
1625 * Pointer to the flow attributes.
1627 * Pointer to the list of items.
1628 * @param[in] actions
1629 * Pointer to the list of actions.
1631 * Pointer to the error structure.
1634 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1637 static struct mlx5_flow *
1638 flow_verbs_prepare(struct rte_eth_dev *dev,
1639 const struct rte_flow_attr *attr __rte_unused,
1640 const struct rte_flow_item items[],
1641 const struct rte_flow_action actions[],
1642 struct rte_flow_error *error)
1645 uint32_t handle_idx = 0;
1646 struct mlx5_flow *dev_flow;
1647 struct mlx5_flow_handle *dev_handle;
1648 struct mlx5_priv *priv = dev->data->dev_private;
1649 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1652 size += flow_verbs_get_actions_size(actions);
1653 size += flow_verbs_get_items_size(items);
1654 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1655 rte_flow_error_set(error, E2BIG,
1656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1657 "Verbs spec/action size too large");
1660 /* In case of corrupting the memory. */
1661 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1662 rte_flow_error_set(error, ENOSPC,
1663 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1664 "not free temporary device flow");
1667 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1670 rte_flow_error_set(error, ENOMEM,
1671 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1672 "not enough memory to create flow handle");
1675 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1676 dev_flow = &wks->flows[wks->flow_idx++];
1677 dev_flow->handle = dev_handle;
1678 dev_flow->handle_idx = handle_idx;
1679 /* Memcpy is used, only size needs to be cleared to 0. */
1680 dev_flow->verbs.size = 0;
1681 dev_flow->verbs.attr.num_of_specs = 0;
1682 dev_flow->ingress = attr->ingress;
1683 dev_flow->hash_fields = 0;
1684 /* Need to set transfer attribute: not supported in Verbs mode. */
1689 * Fill the flow with verb spec.
1692 * Pointer to Ethernet device.
1693 * @param[in, out] dev_flow
1694 * Pointer to the mlx5 flow.
1696 * Pointer to the flow attributes.
1698 * Pointer to the list of items.
1699 * @param[in] actions
1700 * Pointer to the list of actions.
1702 * Pointer to the error structure.
1705 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1708 flow_verbs_translate(struct rte_eth_dev *dev,
1709 struct mlx5_flow *dev_flow,
1710 const struct rte_flow_attr *attr,
1711 const struct rte_flow_item items[],
1712 const struct rte_flow_action actions[],
1713 struct rte_flow_error *error)
1715 uint64_t item_flags = 0;
1716 uint64_t action_flags = 0;
1717 uint64_t priority = attr->priority;
1718 uint32_t subpriority = 0;
1719 struct mlx5_priv *priv = dev->data->dev_private;
1720 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1721 struct mlx5_flow_rss_desc *rss_desc;
1724 rss_desc = &wks->rss_desc;
1725 if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1726 priority = priv->config.flow_prio - 1;
1727 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1730 switch (actions->type) {
1731 case RTE_FLOW_ACTION_TYPE_VOID:
1733 case RTE_FLOW_ACTION_TYPE_FLAG:
1734 flow_verbs_translate_action_flag(dev_flow, actions);
1735 action_flags |= MLX5_FLOW_ACTION_FLAG;
1736 dev_flow->handle->mark = 1;
1738 case RTE_FLOW_ACTION_TYPE_MARK:
1739 flow_verbs_translate_action_mark(dev_flow, actions);
1740 action_flags |= MLX5_FLOW_ACTION_MARK;
1741 dev_flow->handle->mark = 1;
1743 case RTE_FLOW_ACTION_TYPE_DROP:
1744 flow_verbs_translate_action_drop(dev_flow, actions);
1745 action_flags |= MLX5_FLOW_ACTION_DROP;
1746 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1748 case RTE_FLOW_ACTION_TYPE_QUEUE:
1749 flow_verbs_translate_action_queue(rss_desc, actions);
1750 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1751 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1753 case RTE_FLOW_ACTION_TYPE_RSS:
1754 flow_verbs_translate_action_rss(rss_desc, actions);
1755 action_flags |= MLX5_FLOW_ACTION_RSS;
1756 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1758 case RTE_FLOW_ACTION_TYPE_COUNT:
1759 ret = flow_verbs_translate_action_count(dev_flow,
1764 action_flags |= MLX5_FLOW_ACTION_COUNT;
1767 return rte_flow_error_set(error, ENOTSUP,
1768 RTE_FLOW_ERROR_TYPE_ACTION,
1770 "action not supported");
1773 dev_flow->act_flags = action_flags;
1774 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1775 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1777 switch (items->type) {
1778 case RTE_FLOW_ITEM_TYPE_VOID:
1780 case RTE_FLOW_ITEM_TYPE_ETH:
1781 flow_verbs_translate_item_eth(dev_flow, items,
1783 subpriority = MLX5_PRIORITY_MAP_L2;
1784 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1785 MLX5_FLOW_LAYER_OUTER_L2;
1787 case RTE_FLOW_ITEM_TYPE_VLAN:
1788 flow_verbs_translate_item_vlan(dev_flow, items,
1790 subpriority = MLX5_PRIORITY_MAP_L2;
1791 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1792 MLX5_FLOW_LAYER_INNER_VLAN) :
1793 (MLX5_FLOW_LAYER_OUTER_L2 |
1794 MLX5_FLOW_LAYER_OUTER_VLAN);
1796 case RTE_FLOW_ITEM_TYPE_IPV4:
1797 flow_verbs_translate_item_ipv4(dev_flow, items,
1799 subpriority = MLX5_PRIORITY_MAP_L3;
1800 dev_flow->hash_fields |=
1801 mlx5_flow_hashfields_adjust
1803 MLX5_IPV4_LAYER_TYPES,
1804 MLX5_IPV4_IBV_RX_HASH);
1805 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1806 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1808 case RTE_FLOW_ITEM_TYPE_IPV6:
1809 flow_verbs_translate_item_ipv6(dev_flow, items,
1811 subpriority = MLX5_PRIORITY_MAP_L3;
1812 dev_flow->hash_fields |=
1813 mlx5_flow_hashfields_adjust
1815 MLX5_IPV6_LAYER_TYPES,
1816 MLX5_IPV6_IBV_RX_HASH);
1817 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1818 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1820 case RTE_FLOW_ITEM_TYPE_TCP:
1821 flow_verbs_translate_item_tcp(dev_flow, items,
1823 subpriority = MLX5_PRIORITY_MAP_L4;
1824 dev_flow->hash_fields |=
1825 mlx5_flow_hashfields_adjust
1826 (rss_desc, tunnel, ETH_RSS_TCP,
1827 (IBV_RX_HASH_SRC_PORT_TCP |
1828 IBV_RX_HASH_DST_PORT_TCP));
1829 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1830 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1832 case RTE_FLOW_ITEM_TYPE_UDP:
1833 flow_verbs_translate_item_udp(dev_flow, items,
1835 subpriority = MLX5_PRIORITY_MAP_L4;
1836 dev_flow->hash_fields |=
1837 mlx5_flow_hashfields_adjust
1838 (rss_desc, tunnel, ETH_RSS_UDP,
1839 (IBV_RX_HASH_SRC_PORT_UDP |
1840 IBV_RX_HASH_DST_PORT_UDP));
1841 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1842 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1844 case RTE_FLOW_ITEM_TYPE_VXLAN:
1845 flow_verbs_translate_item_vxlan(dev_flow, items,
1847 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1848 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1850 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1851 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1853 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1854 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1856 case RTE_FLOW_ITEM_TYPE_GRE:
1857 flow_verbs_translate_item_gre(dev_flow, items,
1859 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1860 item_flags |= MLX5_FLOW_LAYER_GRE;
1862 case RTE_FLOW_ITEM_TYPE_MPLS:
1863 flow_verbs_translate_item_mpls(dev_flow, items,
1865 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1866 item_flags |= MLX5_FLOW_LAYER_MPLS;
1869 return rte_flow_error_set(error, ENOTSUP,
1870 RTE_FLOW_ERROR_TYPE_ITEM,
1871 NULL, "item not supported");
1874 dev_flow->handle->layers = item_flags;
1875 /* Other members of attr will be ignored. */
1876 dev_flow->verbs.attr.priority =
1877 mlx5_flow_adjust_priority(dev, priority, subpriority);
1878 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1883 * Remove the flow from the NIC but keeps it in memory.
1886 * Pointer to the Ethernet device structure.
1887 * @param[in, out] flow
1888 * Pointer to flow structure.
1891 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1893 struct mlx5_priv *priv = dev->data->dev_private;
1894 struct mlx5_flow_handle *handle;
1895 uint32_t handle_idx;
1899 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1900 handle_idx, handle, next) {
1901 if (handle->drv_flow) {
1902 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1903 handle->drv_flow = NULL;
1905 /* hrxq is union, don't touch it only the flag is set. */
1906 if (handle->rix_hrxq &&
1907 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1908 mlx5_hrxq_release(dev, handle->rix_hrxq);
1909 handle->rix_hrxq = 0;
1911 if (handle->vf_vlan.tag && handle->vf_vlan.created)
1912 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1917 * Remove the flow from the NIC and the memory.
1920 * Pointer to the Ethernet device structure.
1921 * @param[in, out] flow
1922 * Pointer to flow structure.
1925 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1927 struct mlx5_priv *priv = dev->data->dev_private;
1928 struct mlx5_flow_handle *handle;
1932 flow_verbs_remove(dev, flow);
1933 while (flow->dev_handles) {
1934 uint32_t tmp_idx = flow->dev_handles;
1936 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1940 flow->dev_handles = handle->next.next;
1941 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1944 if (flow->counter) {
1945 flow_verbs_counter_release(dev, flow->counter);
1951 * Apply the flow to the NIC.
1954 * Pointer to the Ethernet device structure.
1955 * @param[in, out] flow
1956 * Pointer to flow structure.
1958 * Pointer to error structure.
1961 * 0 on success, a negative errno value otherwise and rte_errno is set.
1964 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1965 struct rte_flow_error *error)
1967 struct mlx5_priv *priv = dev->data->dev_private;
1968 struct mlx5_flow_handle *handle;
1969 struct mlx5_flow *dev_flow;
1970 struct mlx5_hrxq *hrxq;
1971 uint32_t dev_handles;
1974 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1977 for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1978 dev_flow = &wks->flows[idx];
1979 handle = dev_flow->handle;
1980 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1981 MLX5_ASSERT(priv->drop_queue.hrxq);
1982 hrxq = priv->drop_queue.hrxq;
1985 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1987 MLX5_ASSERT(rss_desc->queue_num);
1988 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1989 rss_desc->hash_fields = dev_flow->hash_fields;
1990 rss_desc->tunnel = !!(handle->layers &
1991 MLX5_FLOW_LAYER_TUNNEL);
1992 rss_desc->shared_rss = 0;
1993 hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
1994 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1999 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2000 "cannot get hash queue");
2003 handle->rix_hrxq = hrxq_idx;
2006 handle->drv_flow = mlx5_glue->create_flow
2007 (hrxq->qp, &dev_flow->verbs.attr);
2008 if (!handle->drv_flow) {
2009 rte_flow_error_set(error, errno,
2010 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2012 "hardware refuses to create flow");
2015 if (priv->vmwa_context &&
2016 handle->vf_vlan.tag && !handle->vf_vlan.created) {
2018 * The rule contains the VLAN pattern.
2019 * For VF we are going to create VLAN
2020 * interface to make hypervisor set correct
2021 * e-Switch vport context.
2023 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2028 err = rte_errno; /* Save rte_errno before cleanup. */
2029 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2030 dev_handles, handle, next) {
2031 /* hrxq is union, don't touch it only the flag is set. */
2032 if (handle->rix_hrxq &&
2033 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2034 mlx5_hrxq_release(dev, handle->rix_hrxq);
2035 handle->rix_hrxq = 0;
2037 if (handle->vf_vlan.tag && handle->vf_vlan.created)
2038 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2040 rte_errno = err; /* Restore rte_errno. */
2047 * @see rte_flow_query()
2051 flow_verbs_query(struct rte_eth_dev *dev,
2052 struct rte_flow *flow,
2053 const struct rte_flow_action *actions,
2055 struct rte_flow_error *error)
2059 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2060 switch (actions->type) {
2061 case RTE_FLOW_ACTION_TYPE_VOID:
2063 case RTE_FLOW_ACTION_TYPE_COUNT:
2064 ret = flow_verbs_counter_query(dev, flow, data, error);
2067 return rte_flow_error_set(error, ENOTSUP,
2068 RTE_FLOW_ERROR_TYPE_ACTION,
2070 "action not supported");
2077 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2081 RTE_SET_USED(domains);
2082 RTE_SET_USED(flags);
2087 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2088 .validate = flow_verbs_validate,
2089 .prepare = flow_verbs_prepare,
2090 .translate = flow_verbs_translate,
2091 .apply = flow_verbs_apply,
2092 .remove = flow_verbs_remove,
2093 .destroy = flow_verbs_destroy,
2094 .query = flow_verbs_query,
2095 .sync_domain = flow_verbs_sync_domain,