uint8_t affinity_mode; /* TIS or hash based affinity */
};
+/* DevX flex parser context. */
+struct mlx5_flex_parser_devx {
+ struct mlx5_list_entry entry; /* List element at the beginning. */
+ uint32_t num_samples;
+ void *devx_obj;
+ struct mlx5_devx_graph_node_attr devx_conf;
+ uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+};
+
/* Port flex item context. */
struct mlx5_flex_item {
struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
struct mlx5_list *sample_action_list; /* List of sample actions. */
struct mlx5_list *dest_array_list;
+ struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
/* List of destination array actions. */
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
void *default_miss_action; /* Default miss action. */
struct rte_flow_error *error);
int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+/* Flex parser list callbacks. */
+struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
+int mlx5_flex_parser_match_cb(void *list_ctx,
+ struct mlx5_list_entry *iter, void *ctx);
+void mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
+ struct mlx5_list_entry *entry,
+ void *ctx);
+void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
#endif /* RTE_PMD_MLX5_H_ */
for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
if (priv->flex_item_map & (1 << i)) {
- /* DevX object dereferencing should be provided here. */
+ struct mlx5_flex_item *flex = &priv->flex_item[i];
+
+ claim_zero(mlx5_list_unregister
+ (priv->sh->flex_parsers_dv,
+ &flex->devx_fp->entry));
+ flex->devx_fp = NULL;
+ flex->refcnt = 0;
priv->flex_item_map &= ~(1 << i);
}
}
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
struct mlx5_flex_item *flex;
+ struct mlx5_list_entry *ent;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
flex = mlx5_flex_alloc(priv);
"too many flex items created on the port");
return NULL;
}
+ ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
+ if (!ent) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flex item creation failure");
+ goto error;
+ }
+ flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
RTE_SET_USED(conf);
/* Mark initialized flex item valid. */
__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
return (struct rte_flow_item_flex_handle *)flex;
+
+error:
+ mlx5_flex_free(priv, flex);
+ return NULL;
}
/**
struct mlx5_flex_item *flex =
(struct mlx5_flex_item *)(uintptr_t)handle;
uint32_t old_refcnt = 1;
+ int rc;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
rte_spinlock_lock(&priv->flex_item_sl);
}
/* Flex item is marked as invalid, we can leave locked section. */
rte_spinlock_unlock(&priv->flex_item_sl);
+ MLX5_ASSERT(flex->devx_fp);
+ rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
+ &flex->devx_fp->entry);
+ flex->devx_fp = NULL;
mlx5_flex_free(priv, flex);
+ if (rc < 0)
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex item release failure");
return 0;
}
+
+/* DevX flex parser list callbacks. */
+struct mlx5_list_entry *
+mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list_ctx;
+ struct mlx5_flex_parser_devx *fp, *conf = ctx;
+ int ret;
+
+ fp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flex_parser_devx),
+ 0, SOCKET_ID_ANY);
+ if (!fp)
+ return NULL;
+ /* Copy the requested configurations. */
+ fp->num_samples = conf->num_samples;
+ memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
+ /* Create DevX flex parser. */
+ fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
+ &fp->devx_conf);
+ if (!fp->devx_obj)
+ goto error;
+ /* Query the firmware assigned sample ids. */
+ ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
+ fp->sample_ids,
+ fp->num_samples);
+ if (ret)
+ goto error;
+ DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
+ (const void *)fp, fp->num_samples);
+ return &fp->entry;
+error:
+ if (fp->devx_obj)
+ mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
+ if (fp)
+ mlx5_free(fp);
+ return NULL;
+}
+
+int
+mlx5_flex_parser_match_cb(void *list_ctx,
+ struct mlx5_list_entry *iter, void *ctx)
+{
+ struct mlx5_flex_parser_devx *fp =
+ container_of(iter, struct mlx5_flex_parser_devx, entry);
+ struct mlx5_flex_parser_devx *org =
+ container_of(ctx, struct mlx5_flex_parser_devx, entry);
+
+ RTE_SET_USED(list_ctx);
+ return !iter || !ctx || memcmp(&fp->devx_conf,
+ &org->devx_conf,
+ sizeof(fp->devx_conf));
+}
+
+void
+mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_flex_parser_devx *fp =
+ container_of(entry, struct mlx5_flex_parser_devx, entry);
+
+ RTE_SET_USED(list_ctx);
+ MLX5_ASSERT(fp->devx_obj);
+ claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+ DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
+ mlx5_free(entry);
+}
+
+struct mlx5_list_entry *
+mlx5_flex_parser_clone_cb(void *list_ctx,
+ struct mlx5_list_entry *entry, void *ctx)
+{
+ struct mlx5_flex_parser_devx *fp;
+
+ RTE_SET_USED(list_ctx);
+ RTE_SET_USED(entry);
+ fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
+ 0, SOCKET_ID_ANY);
+ if (!fp)
+ return NULL;
+ memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
+ return &fp->entry;
+}
+
+void
+mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_flex_parser_devx *fp =
+ container_of(entry, struct mlx5_flex_parser_devx, entry);
+ RTE_SET_USED(list_ctx);
+ mlx5_free(fp);
+}