.flush = mlx5_flow_flush,
.isolate = mlx5_flow_isolate,
.query = mlx5_flow_query,
+ .dev_dump = mlx5_flow_dev_dump,
};
/* Convert FDIR request to Generic flow. */
.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
.ptype = RTE_PTYPE_TUNNEL_IP,
},
+ {
+ .tunnel = MLX5_FLOW_LAYER_GTP,
+ .ptype = RTE_PTYPE_TUNNEL_GTPU,
+ },
};
/**
case MLX5_METADATA_TX:
return REG_A;
case MLX5_METADATA_FDB:
- return REG_C_0;
+ switch (config->dv_xmeta_en) {
+ case MLX5_XMETA_MODE_LEGACY:
+ return REG_NONE;
+ case MLX5_XMETA_MODE_META16:
+ return REG_C_0;
+ case MLX5_XMETA_MODE_META32:
+ return REG_C_1;
+ }
+ break;
case MLX5_FLOW_MARK:
switch (config->dv_xmeta_en) {
case MLX5_XMETA_MODE_LEGACY:
&rss->types,
"some RSS protocols are not"
" supported");
+ if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
+ !(rss->types & ETH_RSS_IP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "L3 partial RSS requested but L3 RSS"
+ " type not specified");
+ if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
+ !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "L4 partial RSS requested but L4 RSS"
+ " type not specified");
if (!priv->rxqs_n)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No queues configured");
for (i = 0; i != rss->queue_num; ++i) {
+ if (rss->queue[i] >= priv->rxqs_n)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i], "queue index out of range");
if (!(*priv->rxqs)[rss->queue[i]])
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
const rte_be32_t *mask = item->mask;
int ret = 0;
rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
- const struct rte_flow_item_gre *gre_spec = gre_item->spec;
- const struct rte_flow_item_gre *gre_mask = gre_item->mask;
+ const struct rte_flow_item_gre *gre_spec;
+ const struct rte_flow_item_gre *gre_mask;
if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
return rte_flow_error_set(error, ENOTSUP,
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"GRE key following a wrong item");
+ gre_mask = gre_item->mask;
if (!gre_mask)
gre_mask = &rte_flow_item_gre_mask;
+ gre_spec = gre_item->spec;
if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
!(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
return rte_flow_error_set(error, EINVAL,
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
queue = actions->conf;
+ if (queue == NULL)
+ return 0;
if (mlx5_rxq_get_type(dev, queue->index) !=
MLX5_RXQ_TYPE_HAIRPIN)
return 0;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
rss = actions->conf;
+ if (rss == NULL || rss->queue_num == 0)
+ return 0;
if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
MLX5_RXQ_TYPE_HAIRPIN)
return 0;
mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
if (mcp_res) {
/* For non-default rule. */
- if (mark_id)
+ if (mark_id != MLX5_DEFAULT_COPY_ID)
mcp_res->refcnt++;
- assert(mark_id || mcp_res->refcnt == 1);
+ assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1);
return mcp_res;
}
/* Provide the full width of FLAG specific value. */
if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
/* Build a new flow. */
- if (mark_id) {
+ if (mark_id != MLX5_DEFAULT_COPY_ID) {
items[0] = (struct rte_flow_item){
.type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
.spec = &tag_spec,
}
/*
* We do not check availability of metadata registers here,
- * because copy resources are allocated in this case.
+ * because copy resources are not allocated in this case.
*/
if (--mcp_res->refcnt)
return;
/* Check if default flow is registered. */
if (!priv->mreg_cp_tbl)
return;
- mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 0ULL);
+ mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
+ MLX5_DEFAULT_COPY_ID);
if (!mcp_res)
return;
assert(mcp_res->flow);
!mlx5_flow_ext_mreg_supported(dev) ||
!priv->sh->dv_regc0_mask)
return 0;
- mcp_res = flow_mreg_add_copy_action(dev, 0, error);
+ mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
if (!mcp_res)
return -rte_errno;
return 0;
},
};
uint64_t hash_fields = dev_flow->hash_fields;
- dev_flow = NULL;
+
/*
- * Configure the tag action only if we are not the meter sub
- * flow. Since tag is already marked in the meter suffix sub
- * flow.
+ * Configure the tag item only if there is no meter subflow.
+ * Since tag is already marked in the meter suffix subflow
+ * we can just use the meter suffix items as is.
*/
if (qrss_id) {
+ /* Not meter subflow. */
+ assert(!mtr_sfx);
/*
* Put unique id in prefix flow due to it is destroyed
- * after prefix flow and id will be freed after there
+ * after suffix flow and id will be freed after there
* is no actual flows with this id and identifier
* reallocation becomes possible (for example, for
* other flows in other threads).
goto exit;
q_tag_spec.id = ret;
}
+ dev_flow = NULL;
/* Add suffix subflow to execute Q/RSS. */
ret = flow_create_split_inner(dev, flow, &dev_flow,
&q_attr, mtr_sfx ? items :
sfx_items++;
}
sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
- sfx_items -= METER_SUFFIX_ITEM;
+ sfx_items -= sfx_port_id_item ? 2 : 1;
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
config->flow_mreg_c[n] = REG_NONE;
return 0;
}
+
+/**
+ * Dump flow raw hw data to file
+ *
+ * @param[in] dev
+ * The pointer to Ethernet device.
+ * @param[in] file
+ * A pointer to a file for output.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ * @return
+ * 0 on success, a nagative value otherwise.
+ */
+int
+mlx5_flow_dev_dump(struct rte_eth_dev *dev,
+ FILE *file,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ return mlx5_devx_cmd_flow_dump(priv->sh, file);
+}