else
ret = RTE_FLOW_ITEM_TYPE_END;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ ether_type_m = item->mask ?
+ ((const struct rte_flow_item_geneve *)
+ (item->mask))->protocol :
+ rte_flow_item_geneve_mask.protocol;
+ ether_type = ((const struct rte_flow_item_geneve *)
+ (item->spec))->protocol;
+ ether_type_m = rte_be_to_cpu_16(ether_type_m);
+ ether_type = rte_be_to_cpu_16(ether_type);
+ switch (ether_type_m & ether_type) {
+ case RTE_ETHER_TYPE_TEB:
+ ret = RTE_FLOW_ITEM_TYPE_ETH;
+ break;
+ case RTE_ETHER_TYPE_IPV4:
+ ret = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ ret = RTE_FLOW_ITEM_TYPE_IPV6;
+ break;
+ default:
+ ret = RTE_FLOW_ITEM_TYPE_END;
+ }
+ break;
default:
ret = RTE_FLOW_ITEM_TYPE_VOID;
break;
MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
MLX5_EXPANSION_IPV6_FRAG_EXT,
- MLX5_EXPANSION_GTP
+ MLX5_EXPANSION_GTP,
+ MLX5_EXPANSION_GENEVE,
};
/** Supported expansion of items. */
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GENEVE,
MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GENEVE,
MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
},
[MLX5_EXPANSION_GRE] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_GRE_KEY,
MLX5_EXPANSION_MPLS),
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_GTP,
},
+ [MLX5_EXPANSION_GENEVE] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+ },
};
static struct rte_flow_action_handle *
struct rte_mbuf *m,
struct rte_flow_restore_info *info,
struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
.tunnel_item_release = mlx5_flow_tunnel_item_release,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
+ .flex_item_create = mlx5_flow_flex_item_create,
+ .flex_item_release = mlx5_flow_flex_item_release,
};
/* Tunnel information. */
case MLX5_MTR_COLOR:
case MLX5_ASO_FLOW_HIT:
case MLX5_ASO_CONNTRACK:
+ case MLX5_SAMPLE_ID:
/* All features use the same REG_C. */
MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
return priv->mtr_color_reg;
return;
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+ MLX5_ASSERT(rxq_ctrl != NULL);
+ if (rxq_ctrl == NULL)
+ continue;
/*
* To support metadata register copy on Tx loopback,
* this must be always enabled (metadata may arive
MLX5_ASSERT(dev->data->dev_started);
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+ MLX5_ASSERT(rxq_ctrl != NULL);
+ if (rxq_ctrl == NULL)
+ continue;
if (priv->config.dv_flow_en &&
priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
mlx5_flow_ext_mreg_supported(dev)) {
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
unsigned int j;
- if (!(*priv->rxqs)[i])
+ if (rxq == NULL || rxq->ctrl == NULL)
continue;
- rxq_ctrl = container_of((*priv->rxqs)[i],
- struct mlx5_rxq_ctrl, rxq);
- rxq_ctrl->flow_mark_n = 0;
- rxq_ctrl->rxq.mark = 0;
+ rxq->ctrl->flow_mark_n = 0;
+ rxq->ctrl->rxq.mark = 0;
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
- rxq_ctrl->flow_tunnels_n[j] = 0;
- rxq_ctrl->rxq.tunnel = 0;
+ rxq->ctrl->flow_tunnels_n[j] = 0;
+ rxq->ctrl->rxq.tunnel = 0;
}
}
mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *data;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- if (!(*priv->rxqs)[i])
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_data *data;
+
+ if (rxq == NULL || rxq->ctrl == NULL)
continue;
- data = (*priv->rxqs)[i];
+ data = &rxq->ctrl->rxq;
if (!rte_flow_dynf_metadata_avail()) {
data->dynf_meta = 0;
data->flow_meta_mask = 0;
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&queue->index,
"queue index out of range");
- if (!(*priv->rxqs)[queue->index])
+ if (mlx5_rxq_get(dev, queue->index) == NULL)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&queue->index,
return 0;
}
+/**
+ * Validate queue numbers for device RSS.
+ *
+ * @param[in] dev
+ * Configured device.
+ * @param[in] queues
+ * Array of queue numbers.
+ * @param[in] queues_n
+ * Size of the @p queues array.
+ * @param[out] error
+ * On error, filled with a textual error description.
+ * @param[out] queue
+ * On error, filled with an offending queue index in @p queues array.
+ *
+ * @return
+ * 0 on success, a negative errno code on error.
+ */
+static int
+mlx5_validate_rss_queues(struct rte_eth_dev *dev,
+ const uint16_t *queues, uint32_t queues_n,
+ const char **error, uint32_t *queue_idx)
+{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
+ uint32_t i;
+
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev,
+ queues[i]);
+
+ if (queues[i] >= priv->rxqs_n) {
+ *error = "queue index out of range";
+ *queue_idx = i;
+ return -EINVAL;
+ }
+ if (rxq_ctrl == NULL) {
+ *error = "queue is not configured";
+ *queue_idx = i;
+ return -EINVAL;
+ }
+ if (i == 0)
+ rxq_type = rxq_ctrl->type;
+ if (rxq_type != rxq_ctrl->type) {
+ *error = "combining hairpin and regular RSS queues is not supported";
+ *queue_idx = i;
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
/*
* Validate the rss action.
*
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
- enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
- unsigned int i;
+ int ret;
+ const char *message;
+ uint32_t queue_idx;
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No queues configured");
- for (i = 0; i != rss->queue_num; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- if (rss->queue[i] >= priv->rxqs_n)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i], "queue index out of range");
- if (!(*priv->rxqs)[rss->queue[i]])
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i], "queue is not configured");
- rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
- struct mlx5_rxq_ctrl, rxq);
- if (i == 0)
- rxq_type = rxq_ctrl->type;
- if (rxq_type != rxq_ctrl->type)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i],
- "combining hairpin and regular RSS queues is not supported");
+ ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
+ &message, &queue_idx);
+ if (ret != 0) {
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[queue_idx], message);
}
return 0;
}
* Pointer to the Q/RSS action.
* @param[in] actions_n
* Number of original actions.
+ * @param[in] mtr_sfx
+ * Check if it is in meter suffix table.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
struct rte_flow_action *split_actions,
const struct rte_flow_action *actions,
const struct rte_flow_action *qrss,
- int actions_n, struct rte_flow_error *error)
+ int actions_n, int mtr_sfx,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_action_set_tag *set_tag;
* - Add jump to mreg CP_TBL.
* As a result, there will be one more action.
*/
- ++actions_n;
memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+ /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
+ ++actions_n;
set_tag = (void *)(split_actions + actions_n);
/*
- * If tag action is not set to void(it means we are not the meter
- * suffix flow), add the tag action. Since meter suffix flow already
- * has the tag added.
+ * If we are not the meter suffix flow, add the tag action.
+ * Since meter suffix flow already has the tag added.
*/
- if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
+ if (!mtr_sfx) {
/*
* Allocate the new subflow ID. This one is unique within
* device and not shared with representors. Otherwise,
MLX5_RTE_FLOW_ACTION_TYPE_TAG,
.conf = set_tag,
};
+ } else {
+ /*
+ * If we are the suffix flow of meter, tag already exist.
+ * Set the QUEUE/RSS action to void.
+ */
+ split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID;
}
/* JUMP action to jump to mreg copy table (CP_TBL). */
jump = (void *)(set_tag + 1);
/* Prepare the prefix tag action. */
append_index++;
set_tag = (void *)(actions_pre + actions_n + append_index);
- ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
+ ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
if (ret < 0)
return ret;
mlx5_ipool_malloc(priv->sh->ipool
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no memory to split "
"metadata flow");
- /*
- * If we are the suffix flow of meter, tag already exist.
- * Set the tag action to void.
- */
- if (mtr_sfx)
- ext_actions[qrss - actions].type =
- RTE_FLOW_ACTION_TYPE_VOID;
- else
- ext_actions[qrss - actions].type =
- (enum rte_flow_action_type)
- MLX5_RTE_FLOW_ACTION_TYPE_TAG;
/*
* Create the new actions list with removed Q/RSS action
* and appended set tag and jump to register copy table
* in advance, because it is needed for set tag action.
*/
qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
- qrss, actions_n, error);
+ qrss, actions_n,
+ mtr_sfx, error);
if (!mtr_sfx && !qrss_id) {
ret = -rte_errno;
goto exit;
/* Add suffix subflow to execute Q/RSS. */
flow_split_info->prefix_layers = layers;
flow_split_info->prefix_mark = 0;
+ flow_split_info->table_id = 0;
ret = flow_create_split_inner(dev, flow, &dev_flow,
&q_attr, mtr_sfx ? items :
q_items, q_actions,
static int
mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
sizeof(struct mlx5_counter_stats_mem_mng);
size_t pgsize = rte_mem_page_size();
uint8_t *mem;
+ int ret;
int i;
if (pgsize == (size_t)-1) {
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mem_mng->umem) {
- rte_errno = errno;
- mlx5_free(mem);
- return -rte_errno;
- }
- memset(&mkey_attr, 0, sizeof(mkey_attr));
- mkey_attr.addr = (uintptr_t)mem;
- mkey_attr.size = size;
- mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->cdev->pdn;
- mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
- mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
- if (!mem_mng->dm) {
- mlx5_os_umem_dereg(mem_mng->umem);
+ ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd,
+ sh->cdev->pdn, mem, size,
+ &mem_mng->wm);
+ if (ret) {
rte_errno = errno;
mlx5_free(mem);
return -rte_errno;
ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
MLX5_COUNTERS_PER_POOL,
NULL, NULL,
- pool->raw_hw->mem_mng->dm->id,
+ pool->raw_hw->mem_mng->wm.lkey,
(void *)(uintptr_t)
pool->raw_hw->data,
sh->devx_comp,
return ret;
}
+/**
+ * Validate existing indirect actions against current device configuration
+ * and attach them to device resources.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_attach(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool =
+ priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+ struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+ int ret = 0;
+ uint32_t idx;
+
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+ const char *message;
+ uint32_t queue_idx;
+
+ ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
+ ind_tbl->queues_n,
+ &message, &queue_idx);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
+ dev->data->port_id, ind_tbl->queues[queue_idx],
+ message);
+ break;
+ }
+ }
+ if (ret != 0)
+ return ret;
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u could not attach "
+ "indirection table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ goto error;
+ }
+ }
+ return 0;
+error:
+ shared_rss_last = shared_rss;
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ if (shared_rss == shared_rss_last)
+ break;
+ if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
+ DRV_LOG(CRIT, "Port %u could not detach "
+ "indirection table obj %p on rollback",
+ dev->data->port_id, (void *)ind_tbl);
+ }
+ return ret;
+}
+
+/**
+ * Detach indirect actions of the device from its resources.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_detach(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool =
+ priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+ struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+ int ret = 0;
+ uint32_t idx;
+
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u could not detach "
+ "indirection table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ goto error;
+ }
+ }
+ return 0;
+error:
+ shared_rss_last = shared_rss;
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ if (shared_rss == shared_rss_last)
+ break;
+ if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
+ DRV_LOG(CRIT, "Port %u could not attach "
+ "indirection table obj %p on rollback",
+ dev->data->port_id, (void *)ind_tbl);
+ }
+ return ret;
+}
+
#ifndef HAVE_MLX5DV_DR
#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
#else
return err;
}
-static inline bool
+static inline int
mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
struct rte_flow_tunnel *tunnel,
- const char *err_msg)
+ struct rte_flow_error *error)
{
- err_msg = NULL;
- if (!is_tunnel_offload_active(dev)) {
- err_msg = "tunnel offload was not activated";
- goto out;
- } else if (!tunnel) {
- err_msg = "no application tunnel";
- goto out;
- }
+ struct mlx5_priv *priv = dev->data->dev_private;
+ if (!priv->config.dv_flow_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "flow DV interface is off");
+ if (!is_tunnel_offload_active(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "tunnel offload was not activated");
+ if (!tunnel)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "no application tunnel");
switch (tunnel->type) {
default:
- err_msg = "unsupported tunnel type";
- goto out;
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "unsupported tunnel type");
case RTE_FLOW_ITEM_TYPE_VXLAN:
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_NVGRE:
case RTE_FLOW_ITEM_TYPE_GENEVE:
break;
}
-
-out:
- return !err_msg;
+ return 0;
}
static int
uint32_t *num_of_actions,
struct rte_flow_error *error)
{
- int ret;
struct mlx5_flow_tunnel *tunnel;
- const char *err_msg = NULL;
- bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+ int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
- if (!verdict)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
- err_msg);
+ if (ret)
+ return ret;
ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
if (ret < 0) {
return rte_flow_error_set(error, ret,
uint32_t *num_of_items,
struct rte_flow_error *error)
{
- int ret;
struct mlx5_flow_tunnel *tunnel;
- const char *err_msg = NULL;
- bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+ int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
- if (!verdict)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- err_msg);
+ if (ret)
+ return ret;
ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
if (ret < 0) {
return rte_flow_error_set(error, ret,
}
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "flex item creation unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (!fops->item_create) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return NULL;
+ }
+ return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "flex item release unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (!fops->item_release) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return -rte_errno;
+ }
+ return fops->item_release(dev, handle, error);
+}
+
static void
mlx5_dbg__print_pattern(const struct rte_flow_item *item)
{
}
return node;
}
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
+
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+ { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/**
+ * Discover the number of available flow priorities.
+ *
+ * @param dev
+ * Ethernet device.
+ *
+ * @return
+ * On success, number of available flow priorities.
+ * On failure, a negative errno-style code and rte_errno is set.
+ */
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+{
+ static const uint16_t vprio[] = {8, 16};
+ const struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type;
+ int ret;
+
+ type = mlx5_flow_os_get_type();
+ if (type == MLX5_FLOW_TYPE_MAX) {
+ type = MLX5_FLOW_TYPE_VERBS;
+ if (priv->sh->devx && priv->config.dv_flow_en)
+ type = MLX5_FLOW_TYPE_DV;
+ }
+ fops = flow_get_drv_ops(type);
+ if (fops->discover_priorities == NULL) {
+ DRV_LOG(ERR, "Priority discovery not supported");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
+ if (ret < 0)
+ return ret;
+ switch (ret) {
+ case 8:
+ ret = RTE_DIM(priority_map_3);
+ break;
+ case 16:
+ ret = RTE_DIM(priority_map_5);
+ break;
+ default:
+ rte_errno = ENOTSUP;
+ DRV_LOG(ERR,
+ "port %u maximum priority: %d expected 8/16",
+ dev->data->port_id, ret);
+ return -rte_errno;
+ }
+ DRV_LOG(INFO, "port %u supported flow priorities:"
+ " 0-%d for ingress or egress root table,"
+ " 0-%d for non-root table or transfer root table.",
+ dev->data->port_id, ret - 2,
+ MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
+ return ret;
+}
+
+/**
+ * Adjust flow priority based on the highest layer and the request priority.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] priority
+ * The rule base priority.
+ * @param[in] subpriority
+ * The priority based on the items.
+ *
+ * @return
+ * The new priority.
+ */
+uint32_t
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ uint32_t subpriority)
+{
+ uint32_t res = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch (priv->sh->flow_max_priority) {
+ case RTE_DIM(priority_map_3):
+ res = priority_map_3[priority][subpriority];
+ break;
+ case RTE_DIM(priority_map_5):
+ res = priority_map_5[priority][subpriority];
+ break;
+ }
+ return res;
+}