/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ flow_hw_resource_release(dev);
+#endif
if (priv->rxq_privs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
rte_delay_us_sleep(1000);
#include "mlx5_utils.h"
#include "mlx5_os.h"
#include "mlx5_autoconf.h"
-
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+#include "mlx5_dr.h"
+#endif
#define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)
uint16_t refcnt; /* Reference count for representors. */
};
+/* HW steering queue job descriptor type. */
+enum {
+ MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */
+ MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */
+};
+
+/* HW steering flow management job descriptor. */
+struct mlx5_hw_q_job {
+ uint32_t type; /* Job type. */
+ struct rte_flow *flow; /* Flow attached to the job. */
+ void *user_data; /* Job user data. */
+};
+
+/* HW steering job descriptor LIFO pool. */
+struct mlx5_hw_q {
+ uint32_t job_idx; /* Free job index. */
+ uint32_t size; /* LIFO size. */
+ struct mlx5_hw_q_job **job; /* LIFO header. */
+} __rte_cache_aligned;
+
#define MLX5_COUNTERS_PER_POOL 512
#define MLX5_MAX_PENDING_QUERIES 4
#define MLX5_CNT_CONTAINER_RESIZE 64
struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
/* Flex items have been created on the port. */
uint32_t flex_item_map; /* Map of allocated flex item elements. */
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ struct mlx5dr_context *dr_ctx; /**< HW steering DR context. */
+ uint32_t nb_queue; /* HW steering queue number. */
+ /* HW steering queue polling mechanism job descriptor LIFO. */
+ struct mlx5_hw_q *hw_q;
+#endif
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
const struct rte_flow_item_flex_handle *handle,
struct rte_flow_error *error);
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
.flex_item_create = mlx5_flow_flex_item_create,
.flex_item_release = mlx5_flow_flex_item_release,
+ .info_get = mlx5_flow_info_get,
+ .configure = mlx5_flow_port_configure,
};
/* Tunnel information. */
if (type != MLX5_FLOW_TYPE_MAX)
return type;
+ /*
+ * Currently when dv_flow_en == 2, only HW steering engine is
+ * supported. New engines can also be chosen here if ready.
+ */
+ if (priv->sh->config.dv_flow_en == 2)
+ return MLX5_FLOW_TYPE_HW;
/* If no OS specific type - continue with DV/VERBS selection */
if (attr->transfer && priv->sh->config.dv_esw_en)
type = MLX5_FLOW_TYPE_DV;
return -ENOTSUP;
}
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ * Pointer to port information.
+ * @param[out] queue_info
+ * Pointer to queue information.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "info get with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->info_get(dev, port_info, queue_info, error);
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ * Port configuration attributes.
+ * @param[in] nb_queue
+ * Number of queue.
+ * @param[in] queue_attr
+ * Array that holds attributes for each flow queue.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port configure with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
+}
+
/**
* Allocate a new memory for the counter values wrapped by all the needed
* management.
const struct rte_flow_item_flex_handle *handle,
const struct rte_flow_item_flex_conf *conf,
struct rte_flow_error *error);
+typedef int (*mlx5_flow_info_get_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_port_configure_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_item_create_t item_create;
mlx5_flow_item_release_t item_release;
mlx5_flow_item_update_t item_update;
+ mlx5_flow_info_get_t info_get;
+ mlx5_flow_port_configure_t configure;
};
/* mlx5_flow.c */
mlx5_get_tof(const struct rte_flow_item *items,
const struct rte_flow_action *actions,
enum mlx5_tof_rule_type *rule_type);
+void
+flow_hw_resource_release(struct rte_eth_dev *dev);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
#include <rte_flow.h>
+#include <mlx5_malloc.h>
+#include "mlx5_defs.h"
#include "mlx5_flow.h"
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ * Pointer to port information.
+ * @param[out] queue_info
+ * Pointer to queue information.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow_port_info *port_info __rte_unused,
+ struct rte_flow_queue_info *queue_info __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ /* Nothing to be updated currently. */
+ memset(port_info, 0, sizeof(*port_info));
+ /* Queue size is unlimited from low-level. */
+ queue_info->max_size = UINT32_MAX;
+ return 0;
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ * Port configuration attributes.
+ * @param[in] nb_queue
+ * Number of queue.
+ * @param[in] queue_attr
+ * Array that holds attributes for each flow queue.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5dr_context *dr_ctx = NULL;
+ struct mlx5dr_context_attr dr_ctx_attr = {0};
+ struct mlx5_hw_q *hw_q;
+ struct mlx5_hw_q_job *job = NULL;
+ uint32_t mem_size, i, j;
+
+ if (!port_attr || !nb_queue || !queue_attr) {
+ rte_errno = EINVAL;
+ goto err;
+ }
+ /* In case re-configuring, release existing context at first. */
+ if (priv->dr_ctx) {
+ /* */
+ for (i = 0; i < nb_queue; i++) {
+ hw_q = &priv->hw_q[i];
+ /* Make sure all queues are empty. */
+ if (hw_q->size != hw_q->job_idx) {
+ rte_errno = EBUSY;
+ goto err;
+ }
+ }
+ flow_hw_resource_release(dev);
+ }
+ /* Allocate the queue job descriptor LIFO. */
+ mem_size = sizeof(priv->hw_q[0]) * nb_queue;
+ for (i = 0; i < nb_queue; i++) {
+ /*
+ * Check if the queues' size are all the same as the
+ * limitation from HWS layer.
+ */
+ if (queue_attr[i]->size != queue_attr[0]->size) {
+ rte_errno = EINVAL;
+ goto err;
+ }
+ mem_size += (sizeof(struct mlx5_hw_q_job *) +
+ sizeof(struct mlx5_hw_q_job)) *
+ queue_attr[0]->size;
+ }
+ priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
+ 64, SOCKET_ID_ANY);
+ if (!priv->hw_q) {
+ rte_errno = ENOMEM;
+ goto err;
+ }
+ for (i = 0; i < nb_queue; i++) {
+ priv->hw_q[i].job_idx = queue_attr[i]->size;
+ priv->hw_q[i].size = queue_attr[i]->size;
+ if (i == 0)
+ priv->hw_q[i].job = (struct mlx5_hw_q_job **)
+ &priv->hw_q[nb_queue];
+ else
+ priv->hw_q[i].job = (struct mlx5_hw_q_job **)
+ &job[queue_attr[i - 1]->size];
+ job = (struct mlx5_hw_q_job *)
+ &priv->hw_q[i].job[queue_attr[i]->size];
+ for (j = 0; j < queue_attr[i]->size; j++)
+ priv->hw_q[i].job[j] = &job[j];
+ }
+ dr_ctx_attr.pd = priv->sh->cdev->pd;
+ dr_ctx_attr.queues = nb_queue;
+ /* Queue size should all be the same. Take the first one. */
+ dr_ctx_attr.queue_size = queue_attr[0]->size;
+ dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
+ /* rte_errno has been updated by HWS layer. */
+ if (!dr_ctx)
+ goto err;
+ priv->dr_ctx = dr_ctx;
+ priv->nb_queue = nb_queue;
+ return 0;
+err:
+ if (dr_ctx)
+ claim_zero(mlx5dr_context_close(dr_ctx));
+ mlx5_free(priv->hw_q);
+ priv->hw_q = NULL;
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "fail to configure port");
+}
+
+/**
+ * Release HWS resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ */
+void
+flow_hw_resource_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->dr_ctx)
+ return;
+ mlx5_free(priv->hw_q);
+ priv->hw_q = NULL;
+ claim_zero(mlx5dr_context_close(priv->dr_ctx));
+ priv->dr_ctx = NULL;
+ priv->nb_queue = 0;
+}
+
+const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
+ .info_get = flow_hw_info_get,
+ .configure = flow_hw_configure,
+};
+
#endif