* Copyright 2016 Mellanox Technologies, Ltd
*/
-#include <netinet/in.h>
-#include <sys/queue.h>
#include <stdalign.h>
#include <stdint.h>
#include <string.h>
#include <stdbool.h>
+#include <sys/queue.h>
#include <rte_common.h>
#include <rte_ether.h>
#include "mlx5.h"
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
#include "mlx5_common_os.h"
#include "rte_pmd_mlx5.h"
const struct rte_flow_attr *attr,
const struct rte_flow_action *app_actions,
uint32_t flow_idx,
+ const struct mlx5_flow_tunnel *tunnel,
struct tunnel_default_miss_ctx *ctx,
struct rte_flow_error *error);
static struct mlx5_flow_tunnel *
* RSS types bit-field associated with this node
* (see ETH_RSS_* definitions).
*/
+ uint8_t optional;
+ /**< optional expand field. Default 0 to expand, 1 not go deeper. */
};
/** Object returned by mlx5_flow_expand_rss(). */
} entry[];
};
+static void
+mlx5_dbg__print_pattern(const struct rte_flow_item *item);
+
+static bool
+mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
+{
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
static enum rte_flow_item_type
mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
{
return ret;
}
-#define MLX5_RSS_EXP_ELT_N 8
+#define MLX5_RSS_EXP_ELT_N 16
/**
* Expand RSS flows into several possible flows according to the RSS hash
* set, the following errors are defined:
*
* -E2BIG: graph-depth @p graph is too deep.
+ * -EINVAL: @p size has not enough space for expanded pattern.
*/
static int
mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
memset(&missed_item, 0, sizeof(missed_item));
lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
- if (lsize <= size) {
- buf->entry[0].priority = 0;
- buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
- buf->entries = 0;
- addr = buf->entry[0].pattern;
- }
+ if (lsize > size)
+ return -EINVAL;
+ buf->entry[0].priority = 0;
+ buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
+ buf->entries = 0;
+ addr = buf->entry[0].pattern;
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
- last_item = item;
+ if (!mlx5_flow_is_rss_expandable_item(item)) {
+ user_pattern_size += sizeof(*item);
+ continue;
+ }
+ last_item = item;
for (i = 0; node->next && node->next[i]; ++i) {
next = &graph[node->next[i]];
if (next->type == item->type)
}
user_pattern_size += sizeof(*item); /* Handle END item. */
lsize += user_pattern_size;
+ if (lsize > size)
+ return -EINVAL;
/* Copy the user pattern in the first entry of the buffer. */
- if (lsize <= size) {
- rte_memcpy(addr, pattern, user_pattern_size);
- addr = (void *)(((uintptr_t)addr) + user_pattern_size);
- buf->entries = 1;
- }
+ rte_memcpy(addr, pattern, user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+ buf->entries = 1;
/* Start expanding. */
memset(flow_items, 0, sizeof(flow_items));
user_pattern_size -= sizeof(*item);
elt = 2; /* missed item + item end. */
node = next;
lsize += elt * sizeof(*item) + user_pattern_size;
- if ((node->rss_types & types) && lsize <= size) {
+ if (lsize > size)
+ return -EINVAL;
+ if (node->rss_types & types) {
buf->entry[buf->entries].priority = 1;
buf->entry[buf->entries].pattern = addr;
buf->entries++;
while (node) {
flow_items[stack_pos].type = node->type;
if (node->rss_types & types) {
+ size_t n;
/*
* compute the number of items to copy from the
* expansion and copy it.
elt = stack_pos + 2;
flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
lsize += elt * sizeof(*item) + user_pattern_size;
- if (lsize <= size) {
- size_t n = elt * sizeof(*item);
-
- buf->entry[buf->entries].priority =
- stack_pos + 1 + missed;
- buf->entry[buf->entries].pattern = addr;
- buf->entries++;
- rte_memcpy(addr, buf->entry[0].pattern,
- user_pattern_size);
- addr = (void *)(((uintptr_t)addr) +
- user_pattern_size);
- rte_memcpy(addr, &missed_item,
- missed * sizeof(*item));
- addr = (void *)(((uintptr_t)addr) +
- missed * sizeof(*item));
- rte_memcpy(addr, flow_items, n);
- addr = (void *)(((uintptr_t)addr) + n);
- }
+ if (lsize > size)
+ return -EINVAL;
+ n = elt * sizeof(*item);
+ buf->entry[buf->entries].priority =
+ stack_pos + 1 + missed;
+ buf->entry[buf->entries].pattern = addr;
+ buf->entries++;
+ rte_memcpy(addr, buf->entry[0].pattern,
+ user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) +
+ user_pattern_size);
+ rte_memcpy(addr, &missed_item,
+ missed * sizeof(*item));
+ addr = (void *)(((uintptr_t)addr) +
+ missed * sizeof(*item));
+ rte_memcpy(addr, flow_items, n);
+ addr = (void *)(((uintptr_t)addr) + n);
}
/* Go deeper. */
- if (node->next) {
+ if (!node->optional && node->next) {
next_node = node->next;
if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
rte_errno = E2BIG;
}
node = *next_node ? &graph[*next_node] : NULL;
};
- /* no expanded flows but we have missed item, create one rule for it */
- if (buf->entries == 1 && missed != 0) {
- elt = 2;
- lsize += elt * sizeof(*item) + user_pattern_size;
- if (lsize <= size) {
- buf->entry[buf->entries].priority = 1;
- buf->entry[buf->entries].pattern = addr;
- buf->entries++;
- flow_items[0].type = missed_item.type;
- flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
- rte_memcpy(addr, buf->entry[0].pattern,
- user_pattern_size);
- addr = (void *)(((uintptr_t)addr) + user_pattern_size);
- rte_memcpy(addr, flow_items, elt * sizeof(*item));
- }
- }
return lsize;
}
MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_NVGRE,
+ MLX5_EXPANSION_GRE_KEY,
MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_ETH,
MLX5_EXPANSION_ETH_VLAN,
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
+ MLX5_EXPANSION_IPV6_FRAG_EXT,
};
/** Supported expansion of items. */
},
[MLX5_EXPANSION_OUTER_ETH] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
- MLX5_EXPANSION_OUTER_IPV6,
- MLX5_EXPANSION_MPLS),
+ MLX5_EXPANSION_OUTER_IPV6),
.type = RTE_FLOW_ITEM_TYPE_ETH,
.rss_types = 0,
},
(MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV4_TCP,
MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_NVGRE,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
},
[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
- MLX5_EXPANSION_VXLAN_GPE),
+ MLX5_EXPANSION_VXLAN_GPE,
+ MLX5_EXPANSION_MPLS),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
},
(MLX5_EXPANSION_OUTER_IPV6_UDP,
MLX5_EXPANSION_OUTER_IPV6_TCP,
MLX5_EXPANSION_IPV4,
- MLX5_EXPANSION_IPV6),
+ MLX5_EXPANSION_IPV6,
+ MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_NVGRE),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
ETH_RSS_NONFRAG_IPV6_OTHER,
},
[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
- MLX5_EXPANSION_VXLAN_GPE),
+ MLX5_EXPANSION_VXLAN_GPE,
+ MLX5_EXPANSION_MPLS),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
},
.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
},
[MLX5_EXPANSION_GRE] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6,
+ MLX5_EXPANSION_GRE_KEY,
+ MLX5_EXPANSION_MPLS),
.type = RTE_FLOW_ITEM_TYPE_GRE,
},
+ [MLX5_EXPANSION_GRE_KEY] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6,
+ MLX5_EXPANSION_MPLS),
+ .type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
+ .optional = 1,
+ },
+ [MLX5_EXPANSION_NVGRE] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+ },
[MLX5_EXPANSION_MPLS] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
- MLX5_EXPANSION_IPV6),
+ MLX5_EXPANSION_IPV6,
+ MLX5_EXPANSION_ETH),
.type = RTE_FLOW_ITEM_TYPE_MPLS,
},
[MLX5_EXPANSION_ETH] = {
},
[MLX5_EXPANSION_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
- MLX5_EXPANSION_IPV6_TCP),
+ MLX5_EXPANSION_IPV6_TCP,
+ MLX5_EXPANSION_IPV6_FRAG_EXT),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
ETH_RSS_NONFRAG_IPV6_OTHER,
.type = RTE_FLOW_ITEM_TYPE_TCP,
.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
+ [MLX5_EXPANSION_IPV6_FRAG_EXT] = {
+ .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+ },
};
-static struct rte_flow_shared_action *
-mlx5_shared_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+static struct rte_flow_action_handle *
+mlx5_action_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
struct rte_flow_error *error);
-static int mlx5_shared_action_destroy
+static int mlx5_action_handle_destroy
(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *shared_action,
+ struct rte_flow_action_handle *handle,
struct rte_flow_error *error);
-static int mlx5_shared_action_update
+static int mlx5_action_handle_update
(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *shared_action,
- const struct rte_flow_action *action,
+ struct rte_flow_action_handle *handle,
+ const void *update,
struct rte_flow_error *error);
-static int mlx5_shared_action_query
+static int mlx5_action_handle_query
(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action *action,
+ const struct rte_flow_action_handle *handle,
void *data,
struct rte_flow_error *error);
static int
.query = mlx5_flow_query,
.dev_dump = mlx5_flow_dev_dump,
.get_aged_flows = mlx5_flow_get_aged_flows,
- .shared_action_create = mlx5_shared_action_create,
- .shared_action_destroy = mlx5_shared_action_destroy,
- .shared_action_update = mlx5_shared_action_update,
- .shared_action_query = mlx5_shared_action_query,
+ .action_handle_create = mlx5_action_handle_create,
+ .action_handle_destroy = mlx5_action_handle_destroy,
+ .action_handle_update = mlx5_action_handle_update,
+ .action_handle_query = mlx5_action_handle_query,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
return REG_C_0;
}
break;
- case MLX5_MTR_SFX:
+ case MLX5_MTR_ID:
/*
- * If meter color and flow match share one register, flow match
+ * If meter color and meter id share one register, flow match
* should use the meter color register for match.
*/
if (priv->mtr_reg_share)
return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
REG_C_3;
case MLX5_MTR_COLOR:
- case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
+ case MLX5_ASO_FLOW_HIT:
+ case MLX5_ASO_CONNTRACK:
+ /* All features use the same REG_C. */
MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
return priv->mtr_color_reg;
case MLX5_COPY_MARK:
return config->flow_mreg_c[2] != REG_NON;
}
+/**
+ * Get the lowest priority.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ * Pointer to device flow rule attributes.
+ *
+ * @return
+ * The value of lowest priority of flow.
+ */
+uint32_t
+mlx5_get_lowest_priority(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!attr->group && !attr->transfer)
+ return priv->config.flow_prio - 2;
+ return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
+}
+
+/**
+ * Calculate matcher priority of the flow.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Pointer to device flow rule attributes.
+ * @param[in] subpriority
+ * The priority based on the items.
+ * @return
+ * The matcher priority of the flow.
+ */
+uint16_t
+mlx5_get_matcher_priority(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ uint32_t subpriority)
+{
+ uint16_t priority = (uint16_t)attr->priority;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!attr->group && !attr->transfer) {
+ if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+ priority = priv->config.flow_prio - 1;
+ return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+ }
+ if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+ priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
+ return priority * 3 + subpriority;
+}
+
/**
* Verify the @p item specifications (spec, last, mask) are compatible with the
* NIC capabilities.
* @param[in] dev_handle
* Pointer to device flow handle structure.
*/
-static void
+void
flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle)
{
data->dynf_meta = 0;
data->flow_meta_mask = 0;
data->flow_meta_offset = -1;
+ data->flow_meta_port_mask = 0;
} else {
data->dynf_meta = 1;
data->flow_meta_mask = rte_flow_dynf_metadata_mask;
data->flow_meta_offset = rte_flow_dynf_metadata_offs;
+ data->flow_meta_port_mask = (uint32_t)~0;
+ if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
+ data->flow_meta_port_mask >>= 16;
}
}
}
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"RSS on eCPRI is not supported now");
}
+ if ((item_flags & MLX5_FLOW_LAYER_MPLS) &&
+ !(item_flags &
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) &&
+ rss->level > 1)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern");
return 0;
}
return 0;
}
+/*
+ * Validate the ASO CT action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conntrack
+ * Pointer to the CT action profile.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_validate_action_ct(struct rte_eth_dev *dev,
+ const struct rte_flow_action_conntrack *conntrack,
+ struct rte_flow_error *error)
+{
+ RTE_SET_USED(dev);
+
+ if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Invalid CT state");
+ if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Invalid last TCP packet flag");
+ return 0;
+}
+
/**
* Verify the @p attributes will be correctly understood by the NIC and store
* them in the @p flow if everything is correct.
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL, "groups is not supported");
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
attributes->priority >= priority_max)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"IPv4 cannot follow L2/VLAN layer "
"which ether type is not IPv4");
- if (item_flags & MLX5_FLOW_LAYER_IPIP) {
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
if (mask && spec)
next_proto = mask->hdr.next_proto_id &
spec->hdr.next_proto_id;
"which ether type is not IPv6");
if (mask && mask->hdr.proto == UINT8_MAX && spec)
next_proto = spec->hdr.proto;
- if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
/**
* Validate VXLAN item.
*
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] item
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- * The next protocol in the previous item.
+ * @param[in] attr
+ * Flow rule attributes.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
uint64_t item_flags,
+ const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
union vni {
uint32_t vlan_id;
uint8_t vni[4];
} id = { .vlan_id = 0, };
-
+ const struct rte_flow_item_vxlan nic_mask = {
+ .vni = "\xff\xff\xff",
+ .rsvd1 = 0xff,
+ };
+ const struct rte_flow_item_vxlan *valid_mask;
if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"multiple tunnel layers not"
" supported");
+ valid_mask = &rte_flow_item_vxlan_mask;
/*
* Verify only UDPv4 is present as defined in
* https://tools.ietf.org/html/rfc7348
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_vxlan_mask;
+ /* FDB domain & NIC domain non-zero group */
+ if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
+ valid_mask = &nic_mask;
+ /* Group zero in NIC domain */
+ if (!attr->group && !attr->transfer && priv->sh->tunnel_header_0_1)
+ valid_mask = &nic_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_vxlan_mask,
+ (const uint8_t *)valid_mask,
sizeof(struct rte_flow_item_vxlan),
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
"MPLS not supported or"
" disabled in firmware"
" configuration.");
- /* MPLS over IP, UDP, GRE is allowed */
- if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
- MLX5_FLOW_LAYER_OUTER_L4_UDP |
+ /* MPLS over UDP, GRE is allowed */
+ if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP |
MLX5_FLOW_LAYER_GRE |
MLX5_FLOW_LAYER_GRE_KEY)))
return rte_flow_error_set(error, EINVAL,
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
-/**
- * Release resource related QUEUE/RSS action split.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param flow
- * Flow to release id's from.
- */
-static void
-flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
- struct rte_flow *flow)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t handle_idx;
- struct mlx5_flow_handle *dev_handle;
-
- SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
- handle_idx, dev_handle, next)
- if (dev_handle->split_flow_id)
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
- dev_handle->split_flow_id);
-}
-
static int
flow_null_validate(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_attr *attr __rte_unused,
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
- flow_mreg_split_qrss_release(dev, flow);
MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->destroy(dev, flow);
}
+/**
+ * Flow driver find RSS policy tbl API. This abstracts calling driver
+ * specific functions. Parent flow (rte_flow) should have driver
+ * type (drv_type). It will find the RSS policy table that has the rss_desc.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] policy
+ * Pointer to meter policy table.
+ * @param[in] rss_desc
+ * Pointer to rss_desc
+ */
+static struct mlx5_flow_meter_sub_policy *
+flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct mlx5_flow_meter_policy *policy,
+ struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
+
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc);
+}
+
+/**
+ * Flow driver color tag rule API. This abstracts calling driver
+ * specific functions. Parent flow (rte_flow) should have driver
+ * type (drv_type). It will create the color tag rules in hierarchy meter.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] fm
+ * Pointer to flow meter structure.
+ * @param[in] src_port
+ * The src port this extra rule should use.
+ * @param[in] item
+ * The src port id match item.
+ * @param[out] error
+ * Pointer to error structure.
+ */
+static int
+flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct mlx5_flow_meter_info *fm,
+ int32_t src_port,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
+
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->meter_hierarchy_rule_create(dev, fm,
+ src_port, item, error);
+}
+
/**
* Get RSS action from the action list.
*
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[in] actions
* Pointer to the list of actions.
+ * @param[in] flow
+ * Parent flow structure pointer.
*
* @return
* Pointer to the RSS action if exist, else return NULL.
*/
static const struct rte_flow_action_rss*
-flow_get_rss_action(const struct rte_flow_action actions[])
+flow_get_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[])
{
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = NULL;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
rss = act->conf;
break;
}
+ case RTE_FLOW_ACTION_TYPE_METER:
+ {
+ uint32_t mtr_idx;
+ struct mlx5_flow_meter_info *fm;
+ struct mlx5_flow_meter_policy *policy;
+ const struct rte_flow_action_meter *mtr = actions->conf;
+
+ fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx);
+ if (fm && !fm->def_policy) {
+ policy = mlx5_flow_meter_policy_find(dev,
+ fm->policy_id, NULL);
+ MLX5_ASSERT(policy);
+ if (policy->is_hierarchy) {
+ policy =
+ mlx5_flow_meter_hierarchy_get_final_policy(dev,
+ policy);
+ if (!policy)
+ return NULL;
+ }
+ if (policy->is_rss)
+ rss =
+ policy->act_cnt[RTE_COLOR_GREEN].rss->conf;
+ }
+ break;
+ }
default:
break;
}
return &pool->actions[offset - 1];
}
-/* maps shared action to translated non shared in some actions array */
-struct mlx5_translated_shared_action {
- struct rte_flow_shared_action *action; /**< Shared action */
- int index; /**< Index in related array of rte_flow_action */
+/* maps indirect action to translated direct in some actions array */
+struct mlx5_translated_action_handle {
+ struct rte_flow_action_handle *action; /**< Indirect action handle. */
+ int index; /**< Index in related array of rte_flow_action. */
};
/**
- * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
- * non shared action if translation possible.
- * This functionality used to run same execution path for both shared & non
- * shared actions on flow create. All necessary preparations for shared
- * action handling should be preformed on *shared* actions list returned
+ * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related
+ * direct action if translation possible.
+ * This functionality used to run same execution path for both direct and
+ * indirect actions on flow create. All necessary preparations for indirect
+ * action handling should be performed on *handle* actions list returned
* from this call.
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in] actions
* List of actions to translate.
- * @param[out] shared
- * List to store translated shared actions.
- * @param[in, out] shared_n
- * Size of *shared* array. On return should be updated with number of shared
- * actions retrieved from the *actions* list.
+ * @param[out] handle
+ * List to store translated indirect action object handles.
+ * @param[in, out] indir_n
+ * Size of *handle* array. On return should be updated with number of
+ * indirect actions retrieved from the *actions* list.
* @param[out] translated_actions
- * List of actions where all shared actions were translated to non shared
+ * List of actions where all indirect actions were translated to direct
* if possible. NULL if no translation took place.
* @param[out] error
* Pointer to the error structure.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_shared_actions_translate(struct rte_eth_dev *dev,
+flow_action_handles_translate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
- struct mlx5_translated_shared_action *shared,
- int *shared_n,
+ struct mlx5_translated_action_handle *handle,
+ int *indir_n,
struct rte_flow_action **translated_actions,
struct rte_flow_error *error)
{
size_t actions_size;
int n;
int copied_n = 0;
- struct mlx5_translated_shared_action *shared_end = NULL;
+ struct mlx5_translated_action_handle *handle_end = NULL;
for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
- if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
+ if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT)
continue;
- if (copied_n == *shared_n) {
+ if (copied_n == *indir_n) {
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
NULL, "too many shared actions");
}
- rte_memcpy(&shared[copied_n].action, &actions[n].conf,
+ rte_memcpy(&handle[copied_n].action, &actions[n].conf,
sizeof(actions[n].conf));
- shared[copied_n].index = n;
+ handle[copied_n].index = n;
copied_n++;
}
n++;
- *shared_n = copied_n;
+ *indir_n = copied_n;
if (!copied_n)
return 0;
actions_size = sizeof(struct rte_flow_action) * n;
return -ENOMEM;
}
memcpy(translated, actions, actions_size);
- for (shared_end = shared + copied_n; shared < shared_end; shared++) {
+ for (handle_end = handle + copied_n; handle < handle_end; handle++) {
struct mlx5_shared_action_rss *shared_rss;
- uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
- uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET)
- - 1);
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx &
+ ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_RSS:
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
- translated[shared->index].type =
+ translated[handle->index].type =
RTE_FLOW_ACTION_TYPE_RSS;
- translated[shared->index].conf =
+ translated[handle->index].conf =
&shared_rss->origin;
break;
- case MLX5_SHARED_ACTION_TYPE_AGE:
+ case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+ translated[handle->index].type =
+ (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
+ translated[handle->index].conf = (void *)(uintptr_t)idx;
+ break;
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
if (priv->sh->flow_hit_aso_en) {
- translated[shared->index].type =
+ translated[handle->index].type =
(enum rte_flow_action_type)
MLX5_RTE_FLOW_ACTION_TYPE_AGE;
- translated[shared->index].conf =
+ translated[handle->index].conf =
+ (void *)(uintptr_t)idx;
+ break;
+ }
+ /* Fall-through */
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ if (priv->sh->ct_aso_en) {
+ translated[handle->index].type =
+ RTE_FLOW_ACTION_TYPE_CONNTRACK;
+ translated[handle->index].conf =
(void *)(uintptr_t)idx;
break;
}
mlx5_free(translated);
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "invalid shared action type");
+ NULL, "invalid indirect action type");
}
}
*translated_actions = translated;
*/
static uint32_t
flow_get_shared_rss_action(struct rte_eth_dev *dev,
- struct mlx5_translated_shared_action *shared,
+ struct mlx5_translated_action_handle *handle,
int shared_n)
{
- struct mlx5_translated_shared_action *shared_end;
+ struct mlx5_translated_action_handle *handle_end;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss;
- for (shared_end = shared + shared_n; shared < shared_end; shared++) {
- uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+ for (handle_end = handle + shared_n; handle < handle_end; handle++) {
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
uint32_t idx = act_idx &
- ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_RSS:
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
return actions_n + 1;
}
+/**
+ * Check if the action will change packet.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] type
+ * action type.
+ *
+ * @return
+ * true if action will change packet, false otherwise.
+ */
+static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
+ enum rte_flow_action_type type)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch (type) {
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ return true;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+ return true;
+ else
+ return false;
+ default:
+ return false;
+ }
+}
+
/**
* Check meter action from the action list.
*
+ * @param dev
+ * Pointer to Ethernet device.
* @param[in] actions
* Pointer to the list of actions.
- * @param[out] mtr
+ * @param[out] has_mtr
* Pointer to the meter exist flag.
+ * @param[out] has_modify
+ * Pointer to the flag showing there's packet change action.
+ * @param[out] meter_id
+ * Pointer to the meter id.
*
* @return
* Total number of actions.
*/
static int
-flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
+flow_check_meter_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ bool *has_mtr, bool *has_modify, uint32_t *meter_id)
{
+ const struct rte_flow_action_meter *mtr = NULL;
int actions_n = 0;
- MLX5_ASSERT(mtr);
- *mtr = 0;
+ MLX5_ASSERT(has_mtr);
+ *has_mtr = false;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_METER:
- *mtr = 1;
+ mtr = actions->conf;
+ *meter_id = mtr->mtr_id;
+ *has_mtr = true;
break;
default:
break;
}
+ if (!*has_mtr)
+ *has_modify |= flow_check_modify_action_type(dev,
+ actions->type);
actions_n++;
}
/* Count RTE_FLOW_ACTION_TYPE_END. */
/* Declare flow create/destroy prototype in advance. */
static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
bool external, struct rte_flow_error *error);
static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
uint32_t flow_idx);
int
-flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx __rte_unused)
+flow_dv_mreg_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_mreg_copy_resource *mcp_res =
- container_of(entry, typeof(*mcp_res), hlist_ent);
+ container_of(entry, typeof(*mcp_res), hlist_ent);
- return mcp_res->mark_id != key;
+ return mcp_res->mark_id != *(uint32_t *)(ctx->data);
}
-struct mlx5_hlist_entry *
-flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
- void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_mreg_copy_resource *mcp_res;
struct rte_flow_error *error = ctx->error;
uint32_t idx = 0;
int ret;
- uint32_t mark_id = key;
+ uint32_t mark_id = *(uint32_t *)(ctx->data);
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
.ingress = 1,
};
} else {
/* Default rule, wildcard match. */
- attr.priority = MLX5_FLOW_PRIO_RSVD;
+ attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_END,
};
* be applied, removed, deleted in ardbitrary order
* by list traversing.
*/
- mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
- actions, false, error);
+ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
+ &attr, items, actions, false, error);
if (!mcp_res->rix_flow) {
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
return NULL;
return &mcp_res->hlist_ent;
}
+struct mlx5_list_entry *
+flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx __rte_unused)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ uint32_t idx = 0;
+
+ mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
+ if (!mcp_res) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ memcpy(mcp_res, oentry, sizeof(*mcp_res));
+ mcp_res->idx = idx;
+ return &mcp_res->hlist_ent;
+}
+
+void
+flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+}
+
/**
* Add a flow of copying flow metadata registers in RX_CP_TBL.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.dev = dev,
.error = error,
+ .data = &mark_id,
};
/* Check if already registered. */
}
void
-flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
+flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
struct mlx5_flow_mreg_copy_resource *mcp_res =
- container_of(entry, typeof(*mcp_res), hlist_ent);
- struct rte_eth_dev *dev = list->ctx;
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
MLX5_ASSERT(mcp_res->rix_flow);
- flow_list_destroy(dev, NULL, mcp_res->rix_flow);
+ flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
}
static void
flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
{
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_cb_ctx ctx;
+ uint32_t mark_id;
/* Check if default flow is registered. */
if (!priv->mreg_cp_tbl)
return;
- entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
- MLX5_DEFAULT_COPY_ID, NULL);
+ mark_id = MLX5_DEFAULT_COPY_ID;
+ ctx.data = &mark_id;
+ entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
if (!entry)
return;
mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_flow_cb_ctx ctx;
+ uint32_t mark_id;
/* Check whether extensive metadata feature is engaged. */
if (!priv->config.dv_flow_en ||
* Add default mreg copy flow may be called multiple time, but
* only be called once in stop. Avoid register it twice.
*/
- if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
+ mark_id = MLX5_DEFAULT_COPY_ID;
+ ctx.data = &mark_id;
+ if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
return 0;
- mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
+ mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
if (!mcp_res)
return -rte_errno;
return 0;
rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
actions_rx++;
set_tag = (void *)actions_rx;
- set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
+ *set_tag = (struct mlx5_rte_flow_action_set_tag) {
+ .id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL),
+ .data = flow_id,
+ };
MLX5_ASSERT(set_tag->id > REG_NON);
- set_tag->data = flow_id;
tag_action->conf = set_tag;
/* Create Tx item list. */
rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
dev_flow->handle->mark = 1;
if (sub_flow)
*sub_flow = dev_flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ dev_flow->dv.table_id = flow_split_info->table_id;
+#endif
return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
}
/**
- * Split the meter flow.
- *
- * As meter flow will split to three sub flow, other than meter
- * action, the other actions make sense to only meter accepts
- * the packet. If it need to be dropped, no other additional
- * actions should be take.
- *
- * One kind of special action which decapsulates the L3 tunnel
- * header will be in the prefix sub flow, as not to take the
- * L3 tunnel header into account.
+ * Get the sub policy of a meter.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param wks
+ * Pointer to thread flow work space.
+ * @param[in] attr
+ * Flow rule attributes.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
- * @param[out] sfx_items
- * Suffix flow match items (list terminated by the END pattern item).
- * @param[in] actions
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Pointer to the meter sub policy, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_meter_sub_policy *
+get_meter_sub_policy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct mlx5_flow_workspace *wks,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_meter_policy *policy;
+ struct mlx5_flow_meter_policy *final_policy;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+
+ policy = wks->policy;
+ final_policy = policy->is_hierarchy ? wks->final_policy : policy;
+ if (final_policy->is_rss || final_policy->is_queue) {
+ struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
+ struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
+ uint32_t i;
+
+ /*
+ * This is a tmp dev_flow,
+ * no need to register any matcher for it in translate.
+ */
+ wks->skip_matcher_reg = 1;
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ struct mlx5_flow dev_flow = {0};
+ struct mlx5_flow_handle dev_handle = { {0} };
+ uint8_t fate = final_policy->act_cnt[i].fate_action;
+
+ if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
+ const void *rss_act =
+ final_policy->act_cnt[i].rss->conf;
+ struct rte_flow_action rss_actions[2] = {
+ [0] = {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = rss_act,
+ },
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ .conf = NULL,
+ }
+ };
+
+ dev_flow.handle = &dev_handle;
+ dev_flow.ingress = attr->ingress;
+ dev_flow.flow = flow;
+ dev_flow.external = 0;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ dev_flow.dv.transfer = attr->transfer;
+#endif
+ /**
+ * Translate RSS action to get rss hash fields.
+ */
+ if (flow_drv_translate(dev, &dev_flow, attr,
+ items, rss_actions, error))
+ goto exit;
+ rss_desc_v[i] = wks->rss_desc;
+ rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_desc_v[i].hash_fields =
+ dev_flow.hash_fields;
+ rss_desc_v[i].queue_num =
+ rss_desc_v[i].hash_fields ?
+ rss_desc_v[i].queue_num : 1;
+ rss_desc_v[i].tunnel =
+ !!(dev_flow.handle->layers &
+ MLX5_FLOW_LAYER_TUNNEL);
+ rss_desc[i] = &rss_desc_v[i];
+ } else if (fate == MLX5_FLOW_FATE_QUEUE) {
+ /* This is queue action. */
+ rss_desc_v[i] = wks->rss_desc;
+ rss_desc_v[i].key_len = 0;
+ rss_desc_v[i].hash_fields = 0;
+ rss_desc_v[i].queue =
+ &final_policy->act_cnt[i].queue;
+ rss_desc_v[i].queue_num = 1;
+ rss_desc[i] = &rss_desc_v[i];
+ } else {
+ rss_desc[i] = NULL;
+ }
+ }
+ sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
+ flow, policy, rss_desc);
+ } else {
+ enum mlx5_meter_domain mtr_domain =
+ attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
+ sub_policy = policy->sub_policys[mtr_domain][0];
+ }
+ if (!sub_policy)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to get meter sub-policy.");
+exit:
+ return sub_policy;
+}
+
+/**
+ * Split the meter flow.
+ *
+ * As meter flow will split to three sub flow, other than meter
+ * action, the other actions make sense to only meter accepts
+ * the packet. If it need to be dropped, no other additional
+ * actions should be take.
+ *
+ * One kind of special action which decapsulates the L3 tunnel
+ * header will be in the prefix sub flow, as not to take the
+ * L3 tunnel header into account.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param wks
+ * Pointer to thread flow work space.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[out] sfx_items
+ * Suffix flow match items (list terminated by the END pattern item).
+ * @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] actions_sfx
* Suffix flow actions.
* @param[out] actions_pre
* Prefix flow actions.
- * @param[out] pattern_sfx
- * The pattern items for the suffix flow.
- * @param[out] tag_sfx
- * Pointer to suffix flow tag.
+ * @param[out] mtr_flow_id
+ * Pointer to meter flow id.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_meter_split_prep(struct rte_eth_dev *dev,
- const struct rte_flow_item items[],
- struct rte_flow_item sfx_items[],
- const struct rte_flow_action actions[],
- struct rte_flow_action actions_sfx[],
- struct rte_flow_action actions_pre[])
+ struct rte_flow *flow,
+ struct mlx5_flow_workspace *wks,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_item sfx_items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_action actions_sfx[],
+ struct rte_flow_action actions_pre[],
+ uint32_t *mtr_flow_id,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_info *fm = wks->fm;
struct rte_flow_action *tag_action = NULL;
struct rte_flow_item *tag_item;
struct mlx5_rte_flow_action_set_tag *set_tag;
- struct rte_flow_error error;
const struct rte_flow_action_raw_encap *raw_encap;
const struct rte_flow_action_raw_decap *raw_decap;
- struct mlx5_rte_flow_item_tag *tag_spec;
- struct mlx5_rte_flow_item_tag *tag_mask;
+ struct mlx5_rte_flow_item_tag *tag_item_spec;
+ struct mlx5_rte_flow_item_tag *tag_item_mask;
uint32_t tag_id = 0;
- bool copy_vlan = false;
+ struct rte_flow_item *vlan_item_dst = NULL;
+ const struct rte_flow_item *vlan_item_src = NULL;
+ struct rte_flow_action *hw_mtr_action;
+ struct rte_flow_action *action_pre_head = NULL;
+ int32_t flow_src_port = priv->representor_id;
+ bool mtr_first;
+ uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
+ uint8_t mtr_reg_bits = priv->mtr_reg_share ?
+ MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
+ uint32_t flow_id = 0;
+ uint32_t flow_id_reversed = 0;
+ uint8_t flow_id_bits = 0;
+ int shift;
+ /* Prepare the suffix subflow items. */
+ tag_item = sfx_items++;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ struct mlx5_priv *port_priv;
+ const struct rte_flow_item_port_id *pid_v;
+ int item_type = items->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ pid_v = items->spec;
+ MLX5_ASSERT(pid_v);
+ port_priv = mlx5_port_to_eswitch_info(pid_v->id, false);
+ if (!port_priv)
+ return rte_flow_error_set(error,
+ rte_errno,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pid_v,
+ "Failed to get port info.");
+ flow_src_port = port_priv->representor_id;
+ if (!fm->def_policy && wks->policy->is_hierarchy &&
+ flow_src_port != priv->representor_id) {
+ if (flow_drv_mtr_hierarchy_rule_create(dev,
+ flow, fm,
+ flow_src_port,
+ items,
+ error))
+ return -rte_errno;
+ }
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ sfx_items++;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ /* Determine if copy vlan item below. */
+ vlan_item_src = items;
+ vlan_item_dst = sfx_items++;
+ vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID;
+ break;
+ default:
+ break;
+ }
+ }
+ sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
+ sfx_items++;
+ mtr_first = priv->sh->meter_aso_en &&
+ (attr->egress || (attr->transfer && flow_src_port != UINT16_MAX));
+ /* For ASO meter, meter must be before tag in TX direction. */
+ if (mtr_first) {
+ action_pre_head = actions_pre++;
+ /* Leave space for tag action. */
+ tag_action = actions_pre++;
+ }
/* Prepare the actions for prefix and suffix flow. */
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- struct rte_flow_action **action_cur = NULL;
+ struct rte_flow_action *action_cur = NULL;
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_METER:
- /* Add the extra tag action first. */
- tag_action = actions_pre;
- tag_action->type = (enum rte_flow_action_type)
- MLX5_RTE_FLOW_ACTION_TYPE_TAG;
- actions_pre++;
- action_cur = &actions_pre;
+ if (mtr_first) {
+ action_cur = action_pre_head;
+ } else {
+ /* Leave space for tag action. */
+ tag_action = actions_pre++;
+ action_cur = actions_pre++;
+ }
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- action_cur = &actions_pre;
+ action_cur = actions_pre++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
- action_cur = &actions_pre;
+ action_cur = actions_pre++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
raw_decap = actions->conf;
if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
- action_cur = &actions_pre;
+ action_cur = actions_pre++;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
- copy_vlan = true;
+ if (vlan_item_dst && vlan_item_src) {
+ memcpy(vlan_item_dst, vlan_item_src,
+ sizeof(*vlan_item_dst));
+ /*
+ * Convert to internal match item, it is used
+ * for vlan push and set vid.
+ */
+ vlan_item_dst->type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
+ }
break;
default:
break;
}
if (!action_cur)
- action_cur = &actions_sfx;
- memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
- (*action_cur)++;
+ action_cur = (fm->def_policy) ?
+ actions_sfx++ : actions_pre++;
+ memcpy(action_cur, actions, sizeof(struct rte_flow_action));
}
/* Add end action to the actions. */
actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
+ if (priv->sh->meter_aso_en) {
+ /**
+ * For ASO meter, need to add an extra jump action explicitly,
+ * to jump from meter to policer table.
+ */
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+
+ if (!fm->def_policy) {
+ sub_policy = get_meter_sub_policy(dev, flow, wks,
+ attr, items, error);
+ if (!sub_policy)
+ return -rte_errno;
+ } else {
+ enum mlx5_meter_domain mtr_domain =
+ attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
+
+ sub_policy =
+ &priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
+ }
+ tbl_data = container_of(sub_policy->tbl_rsc,
+ struct mlx5_flow_tbl_data_entry, tbl);
+ hw_mtr_action = actions_pre++;
+ hw_mtr_action->type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_JUMP;
+ hw_mtr_action->conf = tbl_data->jump.action;
+ }
actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
actions_pre++;
- /* Set the tag. */
- set_tag = (void *)actions_pre;
- set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
- mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
- &tag_id);
- if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
- DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
- dev->data->port_id);
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
- return 0;
- } else if (!tag_id) {
- return 0;
+ if (!tag_action)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No tag action space.");
+ if (!mtr_flow_id) {
+ tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
+ goto exit;
}
- set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
- assert(tag_action);
- tag_action->conf = set_tag;
- /* Prepare the suffix subflow items. */
- tag_item = sfx_items++;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- int item_type = items->type;
-
- switch (item_type) {
- case RTE_FLOW_ITEM_TYPE_PORT_ID:
- memcpy(sfx_items, items, sizeof(*sfx_items));
- sfx_items++;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- if (copy_vlan) {
- memcpy(sfx_items, items, sizeof(*sfx_items));
- /*
- * Convert to internal match item, it is used
- * for vlan push and set vid.
- */
- sfx_items->type = (enum rte_flow_item_type)
- MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
- sfx_items++;
- }
- break;
- default:
- break;
+ /* Only default-policy Meter creates mtr flow id. */
+ if (fm->def_policy) {
+ mlx5_ipool_malloc(fm->flow_ipool, &tag_id);
+ if (!tag_id)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate meter flow id.");
+ flow_id = tag_id - 1;
+ flow_id_bits = (!flow_id) ? 1 :
+ (MLX5_REG_BITS - __builtin_clz(flow_id));
+ if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) >
+ mtr_reg_bits) {
+ mlx5_ipool_free(fm->flow_ipool, tag_id);
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Meter flow id exceeds max limit.");
}
+ if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits)
+ priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits;
}
- sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
- sfx_items++;
- tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
- tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
- tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
- tag_mask = tag_spec + 1;
- tag_mask->data = 0xffffff00;
+ /* Build tag actions and items for meter_id/meter flow_id. */
+ set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre;
+ tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
+ tag_item_mask = tag_item_spec + 1;
+ /* Both flow_id and meter_id share the same register. */
+ *set_tag = (struct mlx5_rte_flow_action_set_tag) {
+ .id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
+ 0, error),
+ .offset = mtr_id_offset,
+ .length = mtr_reg_bits,
+ .data = flow->meter,
+ };
+ /*
+ * The color Reg bits used by flow_id are growing from
+ * msb to lsb, so must do bit reverse for flow_id val in RegC.
+ */
+ for (shift = 0; shift < flow_id_bits; shift++)
+ flow_id_reversed = (flow_id_reversed << 1) |
+ ((flow_id >> shift) & 0x1);
+ set_tag->data |=
+ flow_id_reversed << (mtr_reg_bits - flow_id_bits);
+ tag_item_spec->id = set_tag->id;
+ tag_item_spec->data = set_tag->data << mtr_id_offset;
+ tag_item_mask->data = UINT32_MAX << mtr_id_offset;
+ tag_action->type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+ tag_action->conf = set_tag;
tag_item->type = (enum rte_flow_item_type)
- MLX5_RTE_FLOW_ITEM_TYPE_TAG;
- tag_item->spec = tag_spec;
+ MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ tag_item->spec = tag_item_spec;
tag_item->last = NULL;
- tag_item->mask = tag_mask;
- return tag_id;
+ tag_item->mask = tag_item_mask;
+exit:
+ if (mtr_flow_id)
+ *mtr_flow_id = tag_id;
+ return 0;
}
/**
case RTE_FLOW_ACTION_TYPE_MARK:
case RTE_FLOW_ACTION_TYPE_SET_META:
case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ case RTE_FLOW_ACTION_TYPE_METER:
if (fdb_mirror)
*modify_after_mirror = 1;
break;
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
if (ret < 0)
return ret;
- set_tag->id = ret;
mlx5_ipool_malloc(priv->sh->ipool
[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
- set_tag->data = tag_id;
+ *set_tag = (struct mlx5_rte_flow_action_set_tag) {
+ .id = ret,
+ .data = tag_id,
+ };
/* Prepare the suffix subflow items. */
tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
tag_spec->data = tag_id;
if (qrss) {
/* Check if it is in meter suffix table. */
mtr_sfx = attr->group == (attr->transfer ?
- (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
- MLX5_FLOW_TABLE_LEVEL_SUFFIX);
+ (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
+ MLX5_FLOW_TABLE_LEVEL_METER);
/*
* Q/RSS action on NIC Rx should be split in order to pass by
* the mreg copy table (RX_CP_TBL) and then it jumps to the
return ret;
}
+/**
+ * Create meter internal drop flow with the original pattern.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] flow_split_info
+ * Pointer to flow split info structure.
+ * @param[in] fm
+ * Pointer to flow meter structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, negative value otherwise
+ */
+static uint32_t
+flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ struct mlx5_flow_split_info *flow_split_info,
+ struct mlx5_flow_meter_info *fm,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow *dev_flow = NULL;
+ struct rte_flow_attr drop_attr = *attr;
+ struct rte_flow_action drop_actions[3];
+ struct mlx5_flow_split_info drop_split_info = *flow_split_info;
+
+ MLX5_ASSERT(fm->drop_cnt);
+ drop_actions[0].type =
+ (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
+ drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt;
+ drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP;
+ drop_actions[1].conf = NULL;
+ drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END;
+ drop_actions[2].conf = NULL;
+ drop_split_info.external = false;
+ drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT;
+ drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP;
+ drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER;
+ return flow_create_split_inner(dev, flow, &dev_flow,
+ &drop_attr, items, drop_actions,
+ &drop_split_info, error);
+}
+
/**
* The splitting for meter feature.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
struct rte_flow_action *sfx_actions = NULL;
struct rte_flow_action *pre_actions = NULL;
struct rte_flow_item *sfx_items = NULL;
struct mlx5_flow *dev_flow = NULL;
struct rte_flow_attr sfx_attr = *attr;
- uint32_t mtr = 0;
- uint32_t mtr_tag_id = 0;
+ struct mlx5_flow_meter_info *fm = NULL;
+ uint8_t skip_scale_restore;
+ bool has_mtr = false;
+ bool has_modify = false;
+ bool set_mtr_reg = true;
+ bool is_mtr_hierarchy = false;
+ uint32_t meter_id = 0;
+ uint32_t mtr_idx = 0;
+ uint32_t mtr_flow_id = 0;
size_t act_size;
size_t item_size;
int actions_n = 0;
- int ret;
+ int ret = 0;
if (priv->mtr_en)
- actions_n = flow_check_meter_action(actions, &mtr);
- if (mtr) {
- /* The five prefix actions: meter, decap, encap, tag, end. */
- act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
+ actions_n = flow_check_meter_action(dev, actions, &has_mtr,
+ &has_modify, &meter_id);
+ if (has_mtr) {
+ if (flow->meter) {
+ fm = flow_dv_meter_find_by_idx(priv, flow->meter);
+ if (!fm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter not found.");
+ } else {
+ fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx);
+ if (!fm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter not found.");
+ ret = mlx5_flow_meter_attach(priv, fm,
+ &sfx_attr, error);
+ if (ret)
+ return -rte_errno;
+ flow->meter = mtr_idx;
+ }
+ MLX5_ASSERT(wks);
+ wks->fm = fm;
+ if (!fm->def_policy) {
+ wks->policy = mlx5_flow_meter_policy_find(dev,
+ fm->policy_id,
+ NULL);
+ MLX5_ASSERT(wks->policy);
+ if (wks->policy->is_hierarchy) {
+ wks->final_policy =
+ mlx5_flow_meter_hierarchy_get_final_policy(dev,
+ wks->policy);
+ if (!wks->final_policy)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Failed to find terminal policy of hierarchy.");
+ is_mtr_hierarchy = true;
+ }
+ }
+ /*
+ * If it isn't default-policy Meter, and
+ * 1. There's no action in flow to change
+ * packet (modify/encap/decap etc.), OR
+ * 2. No drop count needed for this meter.
+ * 3. It's not meter hierarchy.
+ * Then no need to use regC to save meter id anymore.
+ */
+ if (!fm->def_policy && !is_mtr_hierarchy &&
+ (!has_modify || !fm->drop_cnt))
+ set_mtr_reg = false;
+ /* Prefix actions: meter, decap, encap, tag, jump, end. */
+ act_size = sizeof(struct rte_flow_action) * (actions_n + 6) +
sizeof(struct mlx5_rte_flow_action_set_tag);
- /* tag, vlan, port id, end. */
+ /* Suffix items: tag, vlan, port id, end. */
#define METER_SUFFIX_ITEM 4
item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
sizeof(struct mlx5_rte_flow_item_tag) * 2;
"meter flow");
sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
act_size);
- pre_actions = sfx_actions + actions_n;
- mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
- actions, sfx_actions,
- pre_actions);
- if (!mtr_tag_id) {
+ /* There's no suffix flow for meter of non-default policy. */
+ if (!fm->def_policy)
+ pre_actions = sfx_actions + 1;
+ else
+ pre_actions = sfx_actions + actions_n;
+ ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr,
+ items, sfx_items, actions,
+ sfx_actions, pre_actions,
+ (set_mtr_reg ? &mtr_flow_id : NULL),
+ error);
+ if (ret) {
ret = -rte_errno;
goto exit;
}
/* Add the prefix subflow. */
flow_split_info->prefix_mark = 0;
+ skip_scale_restore = flow_split_info->skip_scale;
+ flow_split_info->skip_scale |=
+ 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
ret = flow_create_split_inner(dev, flow, &dev_flow,
attr, items, pre_actions,
flow_split_info, error);
+ flow_split_info->skip_scale = skip_scale_restore;
if (ret) {
+ if (mtr_flow_id)
+ mlx5_ipool_free(fm->flow_ipool, mtr_flow_id);
ret = -rte_errno;
goto exit;
}
- dev_flow->handle->split_flow_id = mtr_tag_id;
+ if (mtr_flow_id) {
+ dev_flow->handle->split_flow_id = mtr_flow_id;
+ dev_flow->handle->is_meter_flow_id = 1;
+ }
+ if (!fm->def_policy) {
+ if (!set_mtr_reg && fm->drop_cnt)
+ ret =
+ flow_meter_create_drop_flow_with_org_pattern(dev, flow,
+ &sfx_attr, items,
+ flow_split_info,
+ fm, error);
+ goto exit;
+ }
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
- (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
- MLX5_FLOW_TABLE_LEVEL_SUFFIX;
+ (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
+ MLX5_FLOW_TABLE_LEVEL_METER;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
flow_split_info->prefix_mark = dev_flow->handle->mark;
+ flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
}
/* Add the prefix subflow. */
ret = flow_create_split_metadata(dev, flow,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no memory to split "
"sample flow");
- /* The representor_id is -1 for uplink. */
- fdb_tx = (attr->transfer && priv->representor_id != -1);
+ /* The representor_id is UINT16_MAX for uplink. */
+ fdb_tx = (attr->transfer && priv->representor_id != UINT16_MAX);
/*
* When reg_c_preserve is set, metadata registers Cx preserve
* their value even through packet duplication.
struct mlx5_flow_tbl_data_entry,
tbl);
sfx_attr.group = sfx_attr.transfer ?
- (sfx_tbl_data->table_id - 1) :
- sfx_tbl_data->table_id;
+ (sfx_tbl_data->level - 1) : sfx_tbl_data->level;
} else {
MLX5_ASSERT(attr->transfer);
sfx_attr.group = jump_table;
return ret;
}
-static struct mlx5_flow_tunnel *
-flow_tunnel_from_rule(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[])
+static inline struct mlx5_flow_tunnel *
+flow_tunnel_from_rule(const struct mlx5_flow *flow)
{
struct mlx5_flow_tunnel *tunnel;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
- if (is_flow_tunnel_match_rule(dev, attr, items, actions))
- tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
- else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
- tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
- else
- tunnel = NULL;
+ tunnel = (typeof(tunnel))flow->tunnel;
#pragma GCC diagnostic pop
return tunnel;
* A flow index on success, 0 otherwise and rte_errno is set.
*/
static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action original_actions[],
struct rte_flow *flow = NULL;
struct mlx5_flow *dev_flow;
const struct rte_flow_action_rss *rss = NULL;
- struct mlx5_translated_shared_action
- shared_actions[MLX5_MAX_SHARED_ACTIONS];
- int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
+ struct mlx5_translated_action_handle
+ indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
+ int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
uint8_t buffer[2048];
.skip_scale = 0,
.flow_idx = 0,
.prefix_mark = 0,
- .prefix_layers = 0
+ .prefix_layers = 0,
+ .table_id = 0
};
int ret;
MLX5_ASSERT(wks);
rss_desc = &wks->rss_desc;
- ret = flow_shared_actions_translate(dev, original_actions,
- shared_actions,
- &shared_actions_n,
+ ret = flow_action_handles_translate(dev, original_actions,
+ indir_actions,
+ &indir_actions_n,
&translated_actions, error);
if (ret < 0) {
MLX5_ASSERT(translated_actions == NULL);
external, hairpin_flow, error);
if (ret < 0)
goto error_before_hairpin_split;
- flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
+ flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
if (!flow) {
rte_errno = ENOMEM;
goto error_before_hairpin_split;
memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
/* RSS Action only works on NIC RX domain */
if (attr->ingress && !attr->transfer)
- rss = flow_get_rss_action(p_actions_rx);
+ rss = flow_get_rss_action(dev, p_actions_rx);
if (rss) {
if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
return 0;
mlx5_support_expansion, graph_root);
MLX5_ASSERT(ret > 0 &&
(unsigned int)ret < sizeof(expand_buffer.buffer));
+ if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) {
+ for (i = 0; i < buf->entries; ++i)
+ mlx5_dbg__print_pattern(buf->entry[i].pattern);
+ }
} else {
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
- rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
- shared_actions_n);
+ rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
+ indir_actions_n);
for (i = 0; i < buf->entries; ++i) {
/* Initialize flow split data. */
flow_split_info.prefix_layers = 0;
error);
if (ret < 0)
goto error;
- if (is_flow_tunnel_steer_rule(dev, attr,
- buf->entry[i].pattern,
- p_actions_rx)) {
+ if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {
ret = flow_tunnel_add_default_miss(dev, flow, attr,
p_actions_rx,
idx,
+ wks->flows[0].tunnel,
&default_miss_ctx,
error);
if (ret < 0) {
*/
if (external || dev->data->dev_started ||
(attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
- attr->priority == MLX5_FLOW_PRIO_RSVD)) {
+ attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
ret = flow_drv_apply(dev, flow, error);
if (ret < 0)
goto error;
}
- if (list) {
- rte_spinlock_lock(&priv->flow_list_lock);
- ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
- flow, next);
- rte_spinlock_unlock(&priv->flow_list_lock);
- }
+ flow->type = type;
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
- tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
+ tunnel = flow_tunnel_from_rule(wks->flows);
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
+ mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
rte_errno = ret;
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
- return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
+ return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, &pattern,
actions, false, &error);
}
struct rte_flow_error *error)
{
int hairpin_flow;
- struct mlx5_translated_shared_action
- shared_actions[MLX5_MAX_SHARED_ACTIONS];
- int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
+ struct mlx5_translated_action_handle
+ indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
+ int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
const struct rte_flow_action *actions;
struct rte_flow_action *translated_actions = NULL;
- int ret = flow_shared_actions_translate(dev, original_actions,
- shared_actions,
- &shared_actions_n,
+ int ret = flow_action_handles_translate(dev, original_actions,
+ indir_actions,
+ &indir_actions_n,
&translated_actions, error);
if (ret)
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
/*
* If the device is not started yet, it is not allowed to created a
* flow from application. PMD default flows and traffic control flows
return NULL;
}
- return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
- attr, items, actions, true, error);
+ return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN,
+ attr, items, actions,
+ true, error);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param list
- * Pointer to the Indexed flow list. If this parameter NULL,
- * there is no flow removal from the list. Be noted that as
- * flow is add to the indexed list, memory of the indexed
- * list points to maybe changed as flow destroyed.
* @param[in] flow_idx
* Index of flow to destroy.
*/
static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
uint32_t flow_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_RTE_FLOW], flow_idx);
+ struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
if (!flow)
return;
+ MLX5_ASSERT(flow->type == type);
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
flow_drv_destroy(dev, flow);
- if (list) {
- rte_spinlock_lock(&priv->flow_list_lock);
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
- flow_idx, flow, next);
- rte_spinlock_unlock(&priv->flow_list_lock);
- }
if (flow->tunnel) {
struct mlx5_flow_tunnel *tunnel;
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
+ mlx5_ipool_free(priv->flows[type], flow_idx);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param list
- * Pointer to the Indexed flow list.
+ * @param type
+ * Flow type to be flushed.
* @param active
* If flushing is called avtively.
*/
void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ bool active)
{
- uint32_t num_flushed = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t num_flushed = 0, fidx = 1;
+ struct rte_flow *flow;
- while (*list) {
- flow_list_destroy(dev, list, *list);
+ MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
+ flow_list_destroy(dev, type, fidx);
num_flushed++;
}
if (active) {
* @return the number of flows not released.
*/
int
-mlx5_flow_verify(struct rte_eth_dev *dev)
+mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
- uint32_t idx;
- int ret = 0;
+ uint32_t idx = 0;
+ int ret = 0, i;
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
- flow, next) {
- DRV_LOG(DEBUG, "port %u flow %p still referenced",
- dev->data->port_id, (void *)flow);
- ++ret;
+ for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+ MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) {
+ DRV_LOG(DEBUG, "port %u flow %p still referenced",
+ dev->data->port_id, (void *)flow);
+ ret++;
+ }
}
return ret;
}
mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
uint32_t queue)
{
- struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.egress = 1,
.priority = 0,
actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
actions[0].conf = &jump;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
- flow_idx = flow_list_create(dev, &priv->ctrl_flows,
- &attr, items, actions, false, &error);
+ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ &attr, items, actions, false, &error);
if (!flow_idx) {
DRV_LOG(DEBUG,
"Failed to create ctrl flow: rte_errno(%d),"
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
- .priority = MLX5_FLOW_PRIO_RSVD,
+ .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
};
struct rte_flow_item items[] = {
{
action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
- flow_idx = flow_list_create(dev, &priv->ctrl_flows,
- &attr, items, actions, false, &error);
+ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ &attr, items, actions, false, &error);
if (!flow_idx)
return -rte_errno;
return 0;
int
mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
{
- struct mlx5_priv *priv = dev->data->dev_private;
/*
* The LACP matching is done by only using ether type since using
* a multicast dst mac causes kernel to give low priority to this flow.
},
};
struct rte_flow_error error;
- uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
- &attr, items, actions, false, &error);
+ uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ &attr, items, actions,
+ false, &error);
if (!flow_idx)
return -rte_errno;
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
+ flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+ (uintptr_t)(void *)flow);
return 0;
}
mlx5_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error __rte_unused)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- mlx5_flow_list_flush(dev, &priv->flows, false);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false);
return 0;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_driver_ops *fops;
- struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_RTE_FLOW],
+ struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
flow_idx);
enum mlx5_flow_drv_type ftype;
}
/**
- * Manage filter operations.
+ * Get rte_flow callbacks.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Operation to perform.
- * @param arg
+ * @param ops
* Pointer to operation-specific structure.
*
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * @return 0
*/
int
-mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
-{
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
- *(const void **)arg = &mlx5_flow_ops;
- return 0;
- default:
- DRV_LOG(ERR, "port %u filter type (%d) not supported",
- dev->data->port_id, filter_type);
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
+mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
+{
+ *ops = &mlx5_flow_ops;
return 0;
}
/**
- * Create the needed meter and suffix tables.
+ * Validate meter policy actions.
+ * Dispatcher for action type specific validation.
*
* @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] fm
- * Pointer to the flow meter.
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * The meter policy action object to validate.
+ * @param[in] attr
+ * Attributes of flow to determine steering domain.
+ * @param[out] is_rss
+ * Is RSS or not.
+ * @param[out] domain_bitmap
+ * Domain bitmap.
+ * @param[out] is_def_policy
+ * Is default policy or not.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
*
* @return
- * Pointer to table set on success, NULL otherwise.
+ * 0 on success, otherwise negative errno value.
*/
-struct mlx5_meter_domains_infos *
-mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
- const struct mlx5_flow_meter *fm)
+int
+mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
+ bool *is_rss,
+ uint8_t *domain_bitmap,
+ uint8_t *policy_mode,
+ struct rte_mtr_error *error)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->create_mtr_tbls(dev, fm);
+ return fops->validate_mtr_acts(dev, actions, attr, is_rss,
+ domain_bitmap, policy_mode, error);
}
/**
*
* @param[in] dev
* Pointer to Ethernet device.
- * @param[in] tbl
- * Pointer to the meter table set.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ */
+void
+mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_mtr_acts(dev, mtr_policy);
+}
+
+/**
+ * Create policy action, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ * @param[in] action
+ * Action specification used to create meter actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
*
* @return
- * 0 on success.
+ * 0 on success, otherwise negative errno value.
*/
int
-mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
- struct mlx5_meter_domains_infos *tbls)
+mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->destroy_mtr_tbls(dev, tbls);
+ return fops->create_mtr_acts(dev, mtr_policy, actions, error);
}
/**
- * Create policer rules.
+ * Create policy rules, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_policy_rules(dev, mtr_policy);
+}
+
+/**
+ * Destroy policy rules, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ */
+void
+mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_policy_rules(dev, mtr_policy);
+}
+
+/**
+ * Destroy the default policy table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_def_policy(dev);
+}
+
+/**
+ * Destroy the default policy table set.
*
* @param[in] dev
* Pointer to Ethernet device.
- * @param[in] fm
- * Pointer to flow meter structure.
- * @param[in] attr
- * Pointer to flow attributes.
*
* @return
* 0 on success, -1 otherwise.
*/
int
-mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
- struct mlx5_flow_meter *fm,
- const struct rte_flow_attr *attr)
+mlx5_flow_create_def_policy(struct rte_eth_dev *dev)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->create_policer_rules(dev, fm, attr);
+ return fops->create_def_policy(dev);
}
/**
- * Destroy policer rules.
+ * Create the needed meter and suffix tables.
*
- * @param[in] fm
- * Pointer to flow meter structure.
- * @param[in] attr
- * Pointer to flow attributes.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
* 0 on success, -1 otherwise.
*/
int
-mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
- struct mlx5_flow_meter *fm,
- const struct rte_flow_attr *attr)
+mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm,
+ uint32_t mtr_idx,
+ uint8_t domain_bitmap)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->destroy_policer_rules(dev, fm, attr);
+ return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap);
+}
+
+/**
+ * Destroy the meter table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] tbl
+ * Pointer to the meter table set.
+ */
+void
+mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_mtr_tbls(dev, fm);
+}
+
+/**
+ * Destroy the global meter drop table.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_mtr_drop_tbls(dev);
+}
+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+void
+mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_sub_policy_with_rxq(dev, mtr_policy);
+}
+
+/**
+ * Allocate the needed aso flow meter id.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Index to aso flow meter on success, NULL otherwise.
+ */
+uint32_t
+mlx5_flow_mtr_alloc(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_meter(dev);
+}
+
+/**
+ * Free the aso flow meter id.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_idx
+ * Index to aso flow meter to be free.
+ *
+ * @return
+ * 0 on success.
+ */
+void
+mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->free_meter(dev, mtr_idx);
}
/**
mlx5_free(mem);
return -rte_errno;
}
+ memset(&mkey_attr, 0, sizeof(mkey_attr));
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
mkey_attr.pd = sh->pdn;
- mkey_attr.log_entity_size = 0;
- mkey_attr.pg_access = 0;
- mkey_attr.klm_array = NULL;
- mkey_attr.klm_num = 0;
mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
- .priority = MLX5_FLOW_PRIO_RSVD,
+ .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
.ingress = 1,
};
struct rte_flow_item items[] = {
if (!config->dv_flow_en)
break;
/* Create internal flow, validation skips copy action. */
- flow_idx = flow_list_create(dev, NULL, &attr, items,
- actions, false, &error);
- flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
+ items, actions, false, &error);
+ flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
flow_idx);
if (!flow)
continue;
config->flow_mreg_c[n++] = idx;
- flow_list_destroy(dev, NULL, flow_idx);
+ flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
}
for (; n < MLX5_MREG_C_NUM; ++n)
config->flow_mreg_c[n] = REG_NON;
return 0;
}
+int
+save_dump_file(const uint8_t *data, uint32_t size,
+ uint32_t type, uint32_t id, void *arg, FILE *file)
+{
+ char line[BUF_SIZE];
+ uint32_t out = 0;
+ uint32_t k;
+ uint32_t actions_num;
+ struct rte_flow_query_count *count;
+
+ memset(line, 0, BUF_SIZE);
+ switch (type) {
+ case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
+ actions_num = *(uint32_t *)(arg);
+ out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,",
+ type, id, actions_num);
+ break;
+ case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
+ out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,",
+ type, id);
+ break;
+ case DR_DUMP_REC_TYPE_PMD_COUNTER:
+ count = (struct rte_flow_query_count *)arg;
+ fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type,
+ id, count->hits, count->bytes);
+ return 0;
+ default:
+ return -1;
+ }
+
+ for (k = 0; k < size; k++) {
+ /* Make sure we do not overrun the line buffer length. */
+ if (out >= BUF_SIZE - 4) {
+ line[out] = '\0';
+ break;
+ }
+ out += snprintf(line + out, BUF_SIZE - out, "%02x",
+ (data[k]) & 0xff);
+ }
+ fprintf(file, "%s\n", line);
+ return 0;
+}
+
+int
+mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_query_count *count, struct rte_flow_error *error)
+{
+ struct rte_flow_action action[2];
+ enum mlx5_flow_drv_type ftype;
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (!flow) {
+ return rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "invalid flow handle");
+ }
+ action[0].type = RTE_FLOW_ACTION_TYPE_COUNT;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ if (flow->counter) {
+ memset(count, 0, sizeof(struct rte_flow_query_count));
+ ftype = (enum mlx5_flow_drv_type)(flow->drv_type);
+ MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN &&
+ ftype < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(ftype);
+ return fops->query(dev, flow, action, count, error);
+ }
+ return -1;
+}
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+/**
+ * Dump flow ipool data to file
+ *
+ * @param[in] dev
+ * The pointer to Ethernet device.
+ * @param[in] file
+ * A pointer to a file for output.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
+ struct rte_flow *flow, FILE *file,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+ struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dh;
+ struct rte_flow_query_count count;
+ uint32_t actions_num;
+ const uint8_t *data;
+ size_t size;
+ uint32_t id;
+ uint32_t type;
+
+ if (!flow) {
+ return rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "invalid flow handle");
+ }
+ handle_idx = flow->dev_handles;
+ while (handle_idx) {
+ dh = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_MLX5_FLOW], handle_idx);
+ if (!dh)
+ continue;
+ handle_idx = dh->next.next;
+ id = (uint32_t)(uintptr_t)dh->drv_flow;
+
+ /* query counter */
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ if (!mlx5_flow_query_counter(dev, flow, &count, error))
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
+
+ /* Get modify_hdr and encap_decap buf from ipools. */
+ encap_decap = NULL;
+ modify_hdr = dh->dvh.modify_hdr;
+
+ if (dh->dvh.rix_encap_decap) {
+ encap_decap = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_DECAP_ENCAP],
+ dh->dvh.rix_encap_decap);
+ }
+ if (modify_hdr) {
+ data = (const uint8_t *)modify_hdr->actions;
+ size = (size_t)(modify_hdr->actions_num) * 8;
+ actions_num = modify_hdr->actions_num;
+ type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+ save_dump_file(data, size, type, id,
+ (void *)(&actions_num), file);
+ }
+ if (encap_decap) {
+ data = encap_decap->buf;
+ size = encap_decap->size;
+ type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+ save_dump_file(data, size, type,
+ id, NULL, file);
+ }
+ }
+ return 0;
+}
+#endif
+
/**
* Dump flow raw hw data to file
*
* 0 on success, a nagative value otherwise.
*/
int
-mlx5_flow_dev_dump(struct rte_eth_dev *dev,
+mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
FILE *file,
struct rte_flow_error *error __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
+ uint32_t handle_idx;
+ int ret;
+ struct mlx5_flow_handle *dh;
+ struct rte_flow *flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ uint32_t idx;
+#endif
if (!priv->config.dv_flow_en) {
if (fputs("device dv flow disabled\n", file) <= 0)
return -errno;
return -ENOTSUP;
}
- return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
- sh->tx_domain, file);
+
+ /* dump all */
+ if (!flow_idx) {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow)
+ mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+#endif
+ return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
+ sh->rx_domain,
+ sh->tx_domain, file);
+ }
+ /* dump one */
+ flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
+ (uintptr_t)(void *)flow_idx);
+ if (!flow)
+ return -ENOENT;
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+#endif
+ handle_idx = flow->dev_handles;
+ while (handle_idx) {
+ dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ handle_idx);
+ if (!dh)
+ return -ENOENT;
+ if (dh->drv_flow) {
+ ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow,
+ file);
+ if (ret)
+ return -ENOENT;
+ }
+ handle_idx = dh->next.next;
+ }
+ return 0;
}
/**
/* Wrapper for driver action_validate op callback */
static int
flow_drv_action_validate(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
const struct mlx5_flow_driver_ops *fops,
struct rte_flow_error *error)
{
- static const char err_msg[] = "shared action validation unsupported";
+ static const char err_msg[] = "indirect action validation unsupported";
if (!fops->action_validate) {
DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
*
* @param dev
* Pointer to Ethernet device structure.
- * @param[in] action
- * Handle for the shared action to be destroyed.
+ * @param[in] handle
+ * Handle for the indirect action object to be destroyed.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @note: wrapper for driver action_create op callback.
*/
static int
-mlx5_shared_action_destroy(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
+mlx5_action_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_handle *handle,
struct rte_flow_error *error)
{
- static const char err_msg[] = "shared action destruction unsupported";
+ static const char err_msg[] = "indirect action destruction unsupported";
struct rte_flow_attr attr = { .transfer = 0 };
const struct mlx5_flow_driver_ops *fops =
flow_get_drv_ops(flow_get_drv_type(dev, &attr));
NULL, err_msg);
return -rte_errno;
}
- return fops->action_destroy(dev, action, error);
+ return fops->action_destroy(dev, handle, error);
}
/* Wrapper for driver action_destroy op callback */
static int
flow_drv_action_update(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- const void *action_conf,
+ struct rte_flow_action_handle *handle,
+ const void *update,
const struct mlx5_flow_driver_ops *fops,
struct rte_flow_error *error)
{
- static const char err_msg[] = "shared action update unsupported";
+ static const char err_msg[] = "indirect action update unsupported";
if (!fops->action_update) {
DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
NULL, err_msg);
return -rte_errno;
}
- return fops->action_update(dev, action, action_conf, error);
+ return fops->action_update(dev, handle, update, error);
}
/* Wrapper for driver action_destroy op callback */
static int
flow_drv_action_query(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action *action,
+ const struct rte_flow_action_handle *handle,
void *data,
const struct mlx5_flow_driver_ops *fops,
struct rte_flow_error *error)
{
- static const char err_msg[] = "shared action query unsupported";
+ static const char err_msg[] = "indirect action query unsupported";
if (!fops->action_query) {
DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
NULL, err_msg);
return -rte_errno;
}
- return fops->action_query(dev, action, data, error);
+ return fops->action_query(dev, handle, data, error);
}
/**
- * Create shared action for reuse in multiple flow rules.
+ * Create indirect action for reuse in multiple flow rules.
*
* @param dev
* Pointer to Ethernet device structure.
+ * @param conf
+ * Pointer to indirect action object configuration.
* @param[in] action
- * Action configuration for shared action creation.
+ * Action configuration for indirect action object creation.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
* A valid handle in case of success, NULL otherwise and rte_errno is set.
*/
-static struct rte_flow_shared_action *
-mlx5_shared_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+static struct rte_flow_action_handle *
+mlx5_action_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
- static const char err_msg[] = "shared action creation unsupported";
+ static const char err_msg[] = "indirect action creation unsupported";
struct rte_flow_attr attr = { .transfer = 0 };
const struct mlx5_flow_driver_ops *fops =
flow_get_drv_ops(flow_get_drv_type(dev, &attr));
}
/**
- * Updates inplace the shared action configuration pointed by *action* handle
- * with the configuration provided as *action* argument.
- * The update of the shared action configuration effects all flow rules reusing
- * the action via handle.
+ * Updates inplace the indirect action configuration pointed by *handle*
+ * with the configuration provided as *update* argument.
+ * The update of the indirect action configuration effects all flow rules
+ * reusing the action via handle.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param[in] shared_action
- * Handle for the shared action to be updated.
- * @param[in] action
+ * @param[in] handle
+ * Handle for the indirect action to be updated.
+ * @param[in] update
* Action specification used to modify the action pointed by handle.
- * *action* should be of same type with the action pointed by the *action*
- * handle argument, otherwise considered as invalid.
+ * *update* could be of same type with the action pointed by the *handle*
+ * handle argument, or some other structures like a wrapper, depending on
+ * the indirect action type.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_shared_action_update(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *shared_action,
- const struct rte_flow_action *action,
+mlx5_action_handle_update(struct rte_eth_dev *dev,
+ struct rte_flow_action_handle *handle,
+ const void *update,
struct rte_flow_error *error)
{
struct rte_flow_attr attr = { .transfer = 0 };
flow_get_drv_ops(flow_get_drv_type(dev, &attr));
int ret;
- ret = flow_drv_action_validate(dev, NULL, action, fops, error);
+ ret = flow_drv_action_validate(dev, NULL,
+ (const struct rte_flow_action *)update, fops, error);
if (ret)
return ret;
- return flow_drv_action_update(dev, shared_action, action->conf, fops,
+ return flow_drv_action_update(dev, handle, update, fops,
error);
}
/**
- * Query the shared action by handle.
+ * Query the indirect action by handle.
*
* This function allows retrieving action-specific data such as counters.
* Data is gathered by special action which may be present/referenced in
* more than one flow rule definition.
*
- * \see RTE_FLOW_ACTION_TYPE_COUNT
+ * see @RTE_FLOW_ACTION_TYPE_COUNT
*
* @param dev
* Pointer to Ethernet device structure.
- * @param[in] action
- * Handle for the shared action to query.
+ * @param[in] handle
+ * Handle for the indirect action to query.
* @param[in, out] data
* Pointer to storage for the associated query data type.
* @param[out] error
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_shared_action_query(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action *action,
+mlx5_action_handle_query(struct rte_eth_dev *dev,
+ const struct rte_flow_action_handle *handle,
void *data,
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops =
flow_get_drv_ops(flow_get_drv_type(dev, &attr));
- return flow_drv_action_query(dev, action, data, fops, error);
+ return flow_drv_action_query(dev, handle, data, fops, error);
}
/**
- * Destroy all shared actions.
+ * Destroy all indirect actions (shared RSS).
*
* @param dev
* Pointer to Ethernet device.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_shared_action_flush(struct rte_eth_dev *dev)
+mlx5_action_handle_flush(struct rte_eth_dev *dev)
{
struct rte_flow_error error;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_shared_action_rss *action;
+ struct mlx5_shared_action_rss *shared_rss;
int ret = 0;
uint32_t idx;
ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- priv->rss_shared_actions, idx, action, next) {
- ret |= mlx5_shared_action_destroy(dev,
- (struct rte_flow_shared_action *)(uintptr_t)idx, &error);
+ priv->rss_shared_actions, idx, shared_rss, next) {
+ ret |= mlx5_action_handle_destroy(dev,
+ (struct rte_flow_action_handle *)(uintptr_t)idx, &error);
}
return ret;
}
return ret;
}
+const struct mlx5_flow_tunnel *
+mlx5_get_tof(const struct rte_flow_item *item,
+ const struct rte_flow_action *action,
+ enum mlx5_tof_rule_type *rule_type)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == (typeof(item->type))
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+ *rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
+ return flow_items_to_tunnel(item);
+ }
+ }
+ for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == (typeof(action->type))
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+ *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;
+ return flow_actions_to_tunnel(action);
+ }
+ }
+ return NULL;
+}
+
/**
* tunnel offload functionalilty is defined for DV environment only
*/
const struct rte_flow_attr *attr,
const struct rte_flow_action *app_actions,
uint32_t flow_idx,
+ const struct mlx5_flow_tunnel *tunnel,
struct tunnel_default_miss_ctx *ctx,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow *dev_flow;
struct rte_flow_attr miss_attr = *attr;
- const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
const struct rte_flow_item miss_items[2] = {
{
.type = RTE_FLOW_ITEM_TYPE_ETH,
dev_flow->flow = flow;
dev_flow->external = true;
dev_flow->tunnel = tunnel;
+ dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;
/* Subflow object was created, we must include one in the list. */
SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
dev_flow->handle, next);
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
union tunnel_offload_mark mbits = { .val = mark };
union mlx5_flow_tbl_key table_key = {
{
- .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+ .level = tunnel_id_to_flow_tbl(mbits.table_id),
+ .id = 0,
+ .reserved = 0,
.dummy = 0,
- .domain = !!mbits.transfer,
- .direction = 0,
+ .is_fdb = !!mbits.transfer,
+ .is_egress = 0,
}
};
- he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = &table_key.v64,
+ };
+
+ he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx);
return he ?
container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
}
static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
}
static int
-mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx __rte_unused)
+mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
union tunnel_tbl_key tbl = {
- .val = key,
+ .val = *(uint64_t *)(ctx->data),
};
struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
}
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
- void *ctx __rte_unused)
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct tunnel_tbl_entry *tte;
union tunnel_tbl_key tbl = {
- .val = key,
+ .val = *(uint64_t *)(ctx->data),
};
tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
return NULL;
}
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *oentry,
+ void *cb_ctx __rte_unused)
+{
+ struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte),
+ 0, SOCKET_ID_ANY);
+
+ if (!tte)
+ return NULL;
+ memcpy(tte, oentry, sizeof(*tte));
+ return &tte->hash;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ mlx5_free(tte);
+}
+
static uint32_t
tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
struct rte_flow_error *error)
{
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
struct tunnel_tbl_entry *tte;
union tunnel_tbl_key key = {
.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
};
struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
struct mlx5_hlist *group_hash;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = &key.val,
+ };
group_hash = tunnel ? tunnel->groups : thub->groups;
- he = mlx5_hlist_register(group_hash, key.val, NULL);
+ he = mlx5_hlist_register(group_hash, key.val, &ctx);
if (!he)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
return NULL;
}
- tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+ tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true,
+ priv->sh,
mlx5_flow_tunnel_grp2tbl_create_cb,
mlx5_flow_tunnel_grp2tbl_match_cb,
- mlx5_flow_tunnel_grp2tbl_remove_cb);
+ mlx5_flow_tunnel_grp2tbl_remove_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_free_cb);
if (!tunnel->groups) {
mlx5_ipool_free(ipool, id);
return NULL;
}
- tunnel->groups->ctx = priv->sh;
/* initiate new PMD tunnel */
memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
tunnel->tunnel_id = id;
rte_spinlock_unlock(&thub->sl);
ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
- ctx->tunnel->refctn = 1;
rte_spinlock_lock(&thub->sl);
- if (ctx->tunnel)
+ if (ctx->tunnel) {
+ ctx->tunnel->refctn = 1;
LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+ }
}
if (!thub)
return;
if (!LIST_EMPTY(&thub->tunnels))
- DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
+ DRV_LOG(WARNING, "port %u tunnels present", port_id);
mlx5_hlist_destroy(thub->groups);
mlx5_free(thub);
}
return -ENOMEM;
LIST_INIT(&thub->tunnels);
rte_spinlock_init(&thub->sl);
- thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
- 0, mlx5_flow_tunnel_grp2tbl_create_cb,
+ thub->groups = mlx5_hlist_create("flow groups", 64,
+ false, true, sh,
+ mlx5_flow_tunnel_grp2tbl_create_cb,
mlx5_flow_tunnel_grp2tbl_match_cb,
- mlx5_flow_tunnel_grp2tbl_remove_cb);
+ mlx5_flow_tunnel_grp2tbl_remove_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_free_cb);
if (!thub->groups) {
err = -rte_errno;
goto err;
}
- thub->groups->ctx = sh;
sh->tunnel_hub = thub;
return 0;
__rte_unused const struct rte_flow_attr *attr,
__rte_unused const struct rte_flow_action *actions,
__rte_unused uint32_t flow_idx,
+ __rte_unused const struct mlx5_flow_tunnel *tunnel,
__rte_unused struct tunnel_default_miss_ctx *ctx,
__rte_unused struct rte_flow_error *error)
{
}
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+static void
+mlx5_dbg__print_pattern(const struct rte_flow_item *item)
+{
+ int ret;
+ struct rte_flow_error error;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ char *item_name;
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name,
+ sizeof(item_name),
+ (void *)(uintptr_t)item->type, &error);
+ if (ret > 0)
+ printf("%s ", item_name);
+ else
+ printf("%d\n", (int)item->type);
+ }
+ printf("END\n");
+}