err = mlx5_alloc_shared_dr(priv);
if (err)
goto error;
+ if (mlx5_flex_item_port_init(eth_dev) < 0)
+ goto error;
}
if (sh->devx && config->dv_flow_en && config->dest_tir) {
priv->obj_ops = devx_obj_ops;
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
+ if (eth_dev && priv->flex_item_map)
+ mlx5_flex_item_port_cleanup(eth_dev);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;
'mlx5_flow_meter.c',
'mlx5_flow_dv.c',
'mlx5_flow_aso.c',
+ 'mlx5_flow_flex.c',
'mlx5_mac.c',
'mlx5_rss.c',
'mlx5_rx.c',
},
};
-
#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
mlx5_mp_os_req_stop_rxtx(dev);
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
+ mlx5_flex_item_port_cleanup(dev);
if (priv->rxqs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
rte_delay_us_sleep(1000);
#define MLX5_MAX_MODIFY_NUM 32
#define MLX5_ROOT_TBL_MODIFY_NUM 16
+/* Maximal number of flex items created on the port.*/
+#define MLX5_PORT_FLEX_ITEM_NUM 4
+
enum mlx5_ipool_index {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
uint8_t affinity_mode; /* TIS or hash based affinity */
};
+/* Port flex item context. */
+struct mlx5_flex_item {
+ struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
+ uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+};
+
/*
* Shared Infiniband device context for Master/Representors
* which belong to same IB device with multiple IB ports.
struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
uint32_t lag_affinity_idx; /* LAG mode queue 0 affinity starting. */
+ rte_spinlock_t flex_item_sl; /* Flex item list spinlock. */
+ struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
+ /* Flex items have been created on the port. */
+ uint32_t flex_item_map; /* Map of allocated flex item elements. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
uint32_t
mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);
+/* mlx5_flow_flex.c */
+
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+int flow_dv_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *flex_handle,
+ struct rte_flow_error *error);
+int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
+void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
#endif /* RTE_PMD_MLX5_H_ */
struct rte_mbuf *m,
struct rte_flow_restore_info *info,
struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
.tunnel_item_release = mlx5_flow_tunnel_item_release,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
+ .flex_item_create = mlx5_flow_flex_item_create,
+ .flex_item_release = mlx5_flow_flex_item_release,
};
/* Tunnel information. */
}
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "flex item creation unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (!fops->item_create) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return NULL;
+ }
+ return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "flex item release unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (!fops->item_release) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return -rte_errno;
+ }
+ return fops->item_release(dev, handle, error);
+}
+
static void
mlx5_dbg__print_pattern(const struct rte_flow_item *item)
{
typedef int (*mlx5_flow_discover_priorities_t)
(struct rte_eth_dev *dev,
const uint16_t *vprio, int vprio_n);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_action_query_t action_query;
mlx5_flow_sync_domain_t sync_domain;
mlx5_flow_discover_priorities_t discover_priorities;
+ mlx5_flow_item_create_t item_create;
+ mlx5_flow_item_release_t item_release;
+ mlx5_flow_item_update_t item_update;
};
/* mlx5_flow.c */
mlx5_get_tof(const struct rte_flow_item *items,
const struct rte_flow_action *actions,
enum mlx5_tof_rule_type *rule_type);
-
-
#endif /* RTE_PMD_MLX5_FLOW_H_ */
.action_query = flow_dv_action_query,
.sync_domain = flow_dv_sync_domain,
.discover_priorities = flow_dv_discover_priorities,
+ .item_create = flow_dv_item_create,
+ .item_release = flow_dv_item_release,
};
-
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 NVIDIA Corporation & Affiliates
+ */
+#include <rte_malloc.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
+#include "mlx5.h"
+#include "mlx5_flow.h"
+
+static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
+ "Flex item maximal number exceeds uint32_t bit width");
+
+/**
+ * Routine called once on port initialization to init flex item
+ * related infrastructure initialization
+ *
+ * @param dev
+ * Ethernet device to perform flex item initialization
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_item_port_init(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ rte_spinlock_init(&priv->flex_item_sl);
+ MLX5_ASSERT(!priv->flex_item_map);
+ return 0;
+}
+
+/**
+ * Routine called once on port close to perform flex item
+ * related infrastructure cleanup.
+ *
+ * @param dev
+ * Ethernet device to perform cleanup
+ */
+void
+mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
+ if (priv->flex_item_map & (1 << i)) {
+ /* DevX object dereferencing should be provided here. */
+ priv->flex_item_map &= ~(1 << i);
+ }
+ }
+}
+
+static int
+mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+ uintptr_t start = (uintptr_t)&priv->flex_item[0];
+ uintptr_t entry = (uintptr_t)item;
+ uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
+
+ if (entry < start ||
+ idx >= MLX5_PORT_FLEX_ITEM_NUM ||
+ (entry - start) % sizeof(struct mlx5_flex_item) ||
+ !(priv->flex_item_map & (1u << idx)))
+ return -1;
+ return (int)idx;
+}
+
+static struct mlx5_flex_item *
+mlx5_flex_alloc(struct mlx5_priv *priv)
+{
+ struct mlx5_flex_item *item = NULL;
+
+ rte_spinlock_lock(&priv->flex_item_sl);
+ if (~priv->flex_item_map) {
+ uint32_t idx = rte_bsf32(~priv->flex_item_map);
+
+ if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
+ item = &priv->flex_item[idx];
+ MLX5_ASSERT(!item->refcnt);
+ MLX5_ASSERT(!item->devx_fp);
+ item->devx_fp = NULL;
+ __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ priv->flex_item_map |= 1u << idx;
+ }
+ }
+ rte_spinlock_unlock(&priv->flex_item_sl);
+ return item;
+}
+
+static void
+mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+ int idx = mlx5_flex_index(priv, item);
+
+ MLX5_ASSERT(idx >= 0 &&
+ idx < MLX5_PORT_FLEX_ITEM_NUM &&
+ (priv->flex_item_map & (1u << idx)));
+ if (idx >= 0) {
+ rte_spinlock_lock(&priv->flex_item_sl);
+ MLX5_ASSERT(!item->refcnt);
+ MLX5_ASSERT(!item->devx_fp);
+ item->devx_fp = NULL;
+ __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ priv->flex_item_map &= ~(1u << idx);
+ rte_spinlock_unlock(&priv->flex_item_sl);
+ }
+}
+
+/**
+ * Create the flex item with specified configuration over the Ethernet device.
+ *
+ * @param dev
+ * Ethernet device to create flex item on.
+ * @param[in] conf
+ * Flex item configuration.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_item *flex;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ flex = mlx5_flex_alloc(priv);
+ if (!flex) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "too many flex items created on the port");
+ return NULL;
+ }
+ RTE_SET_USED(conf);
+ /* Mark initialized flex item valid. */
+ __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ return (struct rte_flow_item_flex_handle *)flex;
+}
+
+/**
+ * Release the flex item on the specified Ethernet device.
+ *
+ * @param dev
+ * Ethernet device to destroy flex item on.
+ * @param[in] handle
+ * Handle of the item existing on the specified device.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_item *flex =
+ (struct mlx5_flex_item *)(uintptr_t)handle;
+ uint32_t old_refcnt = 1;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ rte_spinlock_lock(&priv->flex_item_sl);
+ if (mlx5_flex_index(priv, flex) < 0) {
+ rte_spinlock_unlock(&priv->flex_item_sl);
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid flex item handle value");
+ }
+ if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ rte_spinlock_unlock(&priv->flex_item_sl);
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex item has flow references");
+ }
+ /* Flex item is marked as invalid, we can leave locked section. */
+ rte_spinlock_unlock(&priv->flex_item_sl);
+ mlx5_flex_free(priv, flex);
+ return 0;
+}