return config->flow_mreg_c[2] != REG_NON;
}
+/**
+ * Get the lowest priority.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ * Pointer to device flow rule attributes.
+ *
+ * @return
+ * The value of lowest priority of flow.
+ */
+uint32_t
+mlx5_get_lowest_priority(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!attr->group && !attr->transfer)
+ return priv->config.flow_prio - 2;
+ return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
+}
+
+/**
+ * Calculate matcher priority of the flow.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Pointer to device flow rule attributes.
+ * @param[in] subpriority
+ * The priority based on the items.
+ * @return
+ * The matcher priority of the flow.
+ */
+uint16_t
+mlx5_get_matcher_priority(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ uint32_t subpriority)
+{
+ uint16_t priority = (uint16_t)attr->priority;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!attr->group && !attr->transfer) {
+ if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+ priority = priv->config.flow_prio - 1;
+ return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+ }
+ if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+ priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
+ return priority * 3 + subpriority;
+}
+
/**
* Verify the @p item specifications (spec, last, mask) are compatible with the
* NIC capabilities.
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL, "groups is not supported");
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
attributes->priority >= priority_max)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
};
} else {
/* Default rule, wildcard match. */
- attr.priority = MLX5_FLOW_PRIO_RSVD;
+ attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_END,
};
*/
if (external || dev->data->dev_started ||
(attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
- attr->priority == MLX5_FLOW_PRIO_RSVD)) {
+ attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
ret = flow_drv_apply(dev, flow, error);
if (ret < 0)
goto error;
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
- .priority = MLX5_FLOW_PRIO_RSVD,
+ .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
};
struct rte_flow_item items[] = {
{
for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
- .priority = MLX5_FLOW_PRIO_RSVD,
+ .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
.ingress = 1,
};
struct rte_flow_item items[] = {
{
struct rte_flow_error error;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_shared_action_rss *action;
+ struct mlx5_shared_action_rss *shared_rss;
int ret = 0;
uint32_t idx;
ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- priv->rss_shared_actions, idx, action, next) {
+ priv->rss_shared_actions, idx, shared_rss, next) {
ret |= mlx5_shared_action_destroy(dev,
(struct rte_flow_shared_action *)(uintptr_t)idx, &error);
}
return -ENOMEM;
LIST_INIT(&thub->tunnels);
rte_spinlock_init(&thub->sl);
- thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
+ thub->groups = mlx5_hlist_create("flow groups",
+ rte_align32pow2(MLX5_MAX_TABLES), 0,
0, mlx5_flow_tunnel_grp2tbl_create_cb,
mlx5_flow_tunnel_grp2tbl_match_cb,
mlx5_flow_tunnel_grp2tbl_remove_cb);