const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
#endif
[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
return ret;
}
+#define MLX5_RSS_EXP_ELT_N 8
+
/**
* Expand RSS flows into several possible flows according to the RSS hash
* fields requested and the driver capabilities.
const struct mlx5_flow_expand_node graph[],
int graph_root_index)
{
- const int elt_n = 8;
const struct rte_flow_item *item;
const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
const int *next_node;
- const int *stack[elt_n];
+ const int *stack[MLX5_RSS_EXP_ELT_N];
int stack_pos = 0;
- struct rte_flow_item flow_items[elt_n];
+ struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
unsigned int i;
size_t lsize;
size_t user_pattern_size = 0;
memset(&missed_item, 0, sizeof(missed_item));
lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
- elt_n * sizeof(buf->entry[0]);
+ MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
if (lsize <= size) {
buf->entry[0].priority = 0;
- buf->entry[0].pattern = (void *)&buf->entry[elt_n];
+ buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
buf->entries = 0;
addr = buf->entry[0].pattern;
}
/* Go deeper. */
if (node->next) {
next_node = node->next;
- if (stack_pos++ == elt_n) {
+ if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
rte_errno = E2BIG;
return -rte_errno;
}
return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
REG_C_3;
case MLX5_MTR_COLOR:
+ case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
return priv->mtr_color_reg;
case MLX5_COPY_MARK:
start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
(priv->mtr_reg_share ? REG_C_3 : REG_C_4);
skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
- if (id > (REG_C_7 - start_reg))
+ if (id > (uint32_t)(REG_C_7 - start_reg))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
*/
if (skip_mtr_reg && config->flow_mreg_c
[id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
- if (id >= (REG_C_7 - start_reg))
+ if (id >= (uint32_t)(REG_C_7 - start_reg))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
- if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
- return;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
dev_handle->rix_hrxq);
- if (!hrxq)
+ if (hrxq)
+ ind_tbl = hrxq->ind_table;
+ } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+ struct mlx5_shared_action_rss *shared_rss;
+
+ shared_rss = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ dev_handle->rix_srss);
+ if (shared_rss)
+ ind_tbl = shared_rss->ind_tbl;
+ }
+ if (!ind_tbl)
return;
- for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
- int idx = hrxq->ind_table->queues[i];
+ for (i = 0; i != ind_tbl->queues_n; ++i) {
+ int idx = ind_tbl->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
- if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
- return;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
dev_handle->rix_hrxq);
- if (!hrxq)
+ if (hrxq)
+ ind_tbl = hrxq->ind_table;
+ } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+ struct mlx5_shared_action_rss *shared_rss;
+
+ shared_rss = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ dev_handle->rix_srss);
+ if (shared_rss)
+ ind_tbl = shared_rss->ind_tbl;
+ }
+ if (!ind_tbl)
return;
MLX5_ASSERT(dev->data->dev_started);
- for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
- int idx = hrxq->ind_table->queues[i];
+ for (i = 0; i != ind_tbl->queues_n; ++i) {
+ int idx = ind_tbl->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
+ enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
unsigned int i;
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No queues configured");
for (i = 0; i != rss->queue_num; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
if (rss->queue[i] >= priv->rxqs_n)
return rte_flow_error_set
(error, EINVAL,
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->queue[i], "queue is not configured");
+ rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
+ struct mlx5_rxq_ctrl, rxq);
+ if (i == 0)
+ rxq_type = rxq_ctrl->type;
+ if (rxq_type != rxq_ctrl->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i],
+ "combining hairpin and regular RSS queues is not supported");
}
return 0;
}
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
- if (raw_encap->size >
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4)))
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
split++;
action_n++;
break;
flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
uint32_t flow_idx);
+int
+flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+
+ return mcp_res->mark_id != key;
+}
+
struct mlx5_hlist_entry *
flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
void *cb_ctx)
return NULL;
}
mcp_res->idx = idx;
+ mcp_res->mark_id = mark_id;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
- if (raw_encap->size >
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4))) {
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
raw_decap = actions->conf;
- if (raw_decap->size <
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4))) {
+ if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
- union mlx5_flow_tbl_key sfx_table_key;
#endif
size_t act_size;
size_t item_size;
sample_res->normal_path_tbl;
sfx_tbl_data = container_of(sfx_tbl,
struct mlx5_flow_tbl_data_entry, tbl);
- sfx_table_key.v64 = sfx_tbl_data->entry.key;
sfx_attr.group = sfx_attr.transfer ?
- (sfx_table_key.table_id - 1) :
- sfx_table_key.table_id;
+ (sfx_tbl_data->table_id - 1) :
+ sfx_tbl_data->table_id;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
flow_split_info->prefix_mark = dev_flow->handle->mark;
}
priv->isolated = !!enable;
if (enable)
- dev->dev_ops = &mlx5_os_dev_ops_isolate;
+ dev->dev_ops = &mlx5_dev_ops_isolate;
else
- dev->dev_ops = &mlx5_os_dev_ops;
+ dev->dev_ops = &mlx5_dev_ops;
dev->rx_descriptor_status = mlx5_rx_descriptor_status;
dev->tx_descriptor_status = mlx5_tx_descriptor_status;
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
+ mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
- mlx5_glue->devx_umem_dereg(mem_mng->umem);
+ mlx5_os_umem_dereg(mem_mng->umem);
rte_errno = errno;
mlx5_free(mem);
return -rte_errno;
mlx5_free(tte);
}
+static int
+mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx __rte_unused)
+{
+ union tunnel_tbl_key tbl = {
+ .val = key,
+ };
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
+}
+
static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
- uint64_t key __rte_unused,
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
void *ctx __rte_unused)
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
struct tunnel_tbl_entry *tte;
+ union tunnel_tbl_key tbl = {
+ .val = key,
+ };
tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
sizeof(*tte), 0,
goto err;
}
tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+ tte->tunnel_id = tbl.tunnel_id;
+ tte->group = tbl.group;
return &tte->hash;
err:
if (tte)
DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
dev->data->port_id, tunnel->tunnel_id);
+ LIST_REMOVE(tunnel, chain);
mlx5_hlist_destroy(tunnel->groups);
ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
mlx5_ipool_free(ipool, tunnel->tunnel_id);
}
tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
mlx5_flow_tunnel_grp2tbl_create_cb,
- NULL,
+ mlx5_flow_tunnel_grp2tbl_match_cb,
mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!tunnel->groups) {
mlx5_ipool_free(ipool, id);
rte_spinlock_init(&thub->sl);
thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
0, mlx5_flow_tunnel_grp2tbl_create_cb,
- NULL,
+ mlx5_flow_tunnel_grp2tbl_match_cb,
mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!thub->groups) {
err = -rte_errno;
const struct mlx5_flow_tbl_data_entry *tble;
const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ if (!is_tunnel_offload_active(dev)) {
+ info->flags = 0;
+ return 0;
+ }
+
if ((ol_flags & mask) != mask)
goto err;
tble = tunnel_mark_decode(dev, m->hash.fdir.hi);