[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+ [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
#endif
[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
const struct rte_flow_item_flex_handle *handle,
struct rte_flow_error *error);
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
.flex_item_create = mlx5_flow_flex_item_create,
.flex_item_release = mlx5_flow_flex_item_release,
+ .info_get = mlx5_flow_info_get,
+ .configure = mlx5_flow_port_configure,
};
/* Tunnel information. */
if (type != MLX5_FLOW_TYPE_MAX)
return type;
+ /*
+ * Currently when dv_flow_en == 2, only HW steering engine is
+ * supported. New engines can also be chosen here if ready.
+ */
+ if (priv->sh->config.dv_flow_en == 2)
+ return MLX5_FLOW_TYPE_HW;
/* If no OS specific type - continue with DV/VERBS selection */
if (attr->transfer && priv->sh->config.dv_esw_en)
type = MLX5_FLOW_TYPE_DV;
uint32_t tag_id = 0;
struct rte_flow_item *vlan_item_dst = NULL;
const struct rte_flow_item *vlan_item_src = NULL;
+ const struct rte_flow_item *orig_items = items;
struct rte_flow_action *hw_mtr_action;
struct rte_flow_action *action_pre_head = NULL;
int32_t flow_src_port = priv->representor_id;
if (!fm->def_policy) {
sub_policy = get_meter_sub_policy(dev, flow, wks,
- attr, items, error);
+ attr, orig_items,
+ error);
if (!sub_policy)
return -rte_errno;
} else {
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q creation not supported");
+ return NULL;
+ }
/*
* If the device is not started yet, it is not allowed to created a
* flow from application. PMD default flows and traffic control flows
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q destruction not supported");
flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
(uintptr_t)(void *)flow);
return 0;
struct rte_flow_error *error)
{
int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q query not supported");
ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
error);
if (ret < 0)
*/
int
mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
- bool clear, uint64_t *pkts, uint64_t *bytes)
+ bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
{
const struct mlx5_flow_driver_ops *fops;
struct rte_flow_attr attr = { .transfer = 0 };
if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->counter_query(dev, cnt, clear, pkts, bytes);
+ return fops->counter_query(dev, cnt, clear, pkts,
+ bytes, action);
}
DRV_LOG(ERR,
"port %u counter query is not supported.",
return -ENOTSUP;
}
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ * Pointer to port information.
+ * @param[out] queue_info
+ * Pointer to queue information.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "info get with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->info_get(dev, port_info, queue_info, error);
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ * Port configuration attributes.
+ * @param[in] nb_queue
+ * Number of queue.
+ * @param[in] queue_attr
+ * Array that holds attributes for each flow queue.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port configure with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
+}
+
/**
* Allocate a new memory for the counter values wrapped by all the needed
* management.
"invalid flow handle");
}
handle_idx = flow->dev_handles;
+ /* query counter */
+ if (flow->counter &&
+ (!mlx5_counter_query(dev, flow->counter, false,
+ &count.hits, &count.bytes, &action)) && action) {
+ id = (uint64_t)(uintptr_t)action;
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
+ }
+
while (handle_idx) {
dh = mlx5_ipool_get(priv->sh->ipool
[MLX5_IPOOL_MLX5_FLOW], handle_idx);
continue;
handle_idx = dh->next.next;
- /* query counter */
- type = DR_DUMP_REC_TYPE_PMD_COUNTER;
- flow_dv_query_count_ptr(dev, flow->counter,
- &action, error);
- if (action) {
- id = (uint64_t)(uintptr_t)action;
- if (!mlx5_flow_query_counter(dev, flow, &count, error))
- save_dump_file(NULL, 0, type,
- id, (void *)&count, file);
- }
/* Get modify_hdr and encap_decap buf from ipools. */
encap_decap = NULL;
modify_hdr = dh->dvh.modify_hdr;
*/
static int
mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
- FILE *file, struct rte_flow_error *error)
+ FILE *file, struct rte_flow_error *error __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
for (j = 1; j <= max; j++) {
action = NULL;
- flow_dv_query_count_ptr(dev, j, &action, error);
- if (action) {
- if (!flow_dv_query_count(dev, j, &count, error)) {
- type = DR_DUMP_REC_TYPE_PMD_COUNTER;
- id = (uint64_t)(uintptr_t)action;
- save_dump_file(NULL, 0, type,
- id, (void *)&count, file);
- }
+ if ((!mlx5_counter_query(dev, j, false, &count.hits,
+ &count.bytes, &action)) && action) {
+ id = (uint64_t)(uintptr_t)action;
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
}
}
return 0;