/* Flex items have been created on the port. */
uint32_t flex_item_map; /* Map of allocated flex item elements. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ /* Item template list. */
+ LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt;
struct mlx5dr_context *dr_ctx; /**< HW steering DR context. */
uint32_t nb_queue; /* HW steering queue number. */
/* HW steering queue polling mechanism job descriptor LIFO. */
const struct rte_flow_queue_attr *queue_attr[],
struct rte_flow_error *err);
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error);
+
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *template,
+ struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
.flex_item_release = mlx5_flow_flex_item_release,
.info_get = mlx5_flow_info_get,
.configure = mlx5_flow_port_configure,
+ .pattern_template_create = mlx5_flow_pattern_template_create,
+ .pattern_template_destroy = mlx5_flow_pattern_template_destroy,
};
/* Tunnel information. */
return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
}
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the item template attributes.
+ * @param[in] items
+ * The template item pattern.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "pattern create with incorrect steering mode");
+ return NULL;
+ }
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->pattern_template_create(dev, attr, items, error);
+}
+
+/**
+ * Destroy flow item template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] template
+ * Pointer to the item template to be destroyed.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *template,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "pattern destroy with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->pattern_template_destroy(dev, template, error);
+}
+
/**
* Allocate a new memory for the counter values wrapped by all the needed
* management.
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+
+/* Flow item template struct. */
+struct rte_flow_pattern_template {
+ LIST_ENTRY(rte_flow_pattern_template) next;
+ /* Template attributes. */
+ struct rte_flow_pattern_template_attr attr;
+ struct mlx5dr_match_template *mt; /* mlx5 match template. */
+ uint32_t refcnt; /* Reference counter. */
+};
+
+#endif
+
/*
* Define list of valid combinations of RX Hash fields
* (see enum ibv_rx_hash_fields).
uint16_t nb_queue,
const struct rte_flow_queue_attr *queue_attr[],
struct rte_flow_error *err);
+typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_pattern_template_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *template,
+ struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_item_update_t item_update;
mlx5_flow_info_get_t info_get;
mlx5_flow_port_configure_t configure;
+ mlx5_flow_pattern_template_create_t pattern_template_create;
+ mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
};
/* mlx5_flow.c */
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the item template attributes.
+ * @param[in] items
+ * The template item pattern.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Item template pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_pattern_template *
+flow_hw_pattern_template_create(struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_pattern_template *it;
+
+ it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
+ if (!it) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate item template");
+ return NULL;
+ }
+ it->attr = *attr;
+ it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
+ if (!it->mt) {
+ mlx5_free(it);
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create match template");
+ return NULL;
+ }
+ __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+ LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
+ return it;
+}
+
+/**
+ * Destroy flow item template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] template
+ * Pointer to the item template to be destroyed.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow_pattern_template *template,
+ struct rte_flow_error *error __rte_unused)
+{
+ if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ DRV_LOG(WARNING, "Item template %p is still in use.",
+ (void *)template);
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "item template in using");
+ }
+ LIST_REMOVE(template, next);
+ claim_zero(mlx5dr_match_template_destroy(template->mt));
+ mlx5_free(template);
+ return 0;
+}
+
+/*
* Get information about HWS pre-configurable resources.
*
* @param[in] dev
flow_hw_resource_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_pattern_template *it;
if (!priv->dr_ctx)
return;
+ while (!LIST_EMPTY(&priv->flow_hw_itt)) {
+ it = LIST_FIRST(&priv->flow_hw_itt);
+ flow_hw_pattern_template_destroy(dev, it, NULL);
+ }
mlx5_free(priv->hw_q);
priv->hw_q = NULL;
claim_zero(mlx5dr_context_close(priv->dr_ctx));
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
+ .pattern_template_create = flow_hw_pattern_template_create,
+ .pattern_template_destroy = flow_hw_pattern_template_destroy,
};
#endif