]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: add flex item operations
authorViacheslav Ovsiienko <viacheslavo@nvidia.com>
Tue, 2 Nov 2021 08:53:42 +0000 (10:53 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 4 Nov 2021 21:55:38 +0000 (22:55 +0100)
This patch is a preparation step of implementing
flex item feature in driver and it provides:

  - external entry point routines for flex item
    creation/deletion

  - flex item objects management over the ports.

The flex item object keeps information about
the item created over the port - reference counter
to track whether item is in use by some active
flows and the pointer to underlying shared DevX
object, providing all the data needed to translate
the flow flex pattern into matcher fields according
hardware configuration.

There is not too many flex items supposed to be
created on the port, the design is optimized
rather for flow insertion rate than memory savings.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/meson.build
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_flex.c [new file with mode: 0644]

index dd4fc0c716519dad6a283bd1616b6abf4e108a3e..b41bcea11fb481ad1d6e9c800683a763d8d31c8e 100644 (file)
@@ -1687,6 +1687,8 @@ err_secondary:
                err = mlx5_alloc_shared_dr(priv);
                if (err)
                        goto error;
+               if (mlx5_flex_item_port_init(eth_dev) < 0)
+                       goto error;
        }
        if (sh->devx && config->dv_flow_en && config->dest_tir) {
                priv->obj_ops = devx_obj_ops;
@@ -1819,6 +1821,8 @@ error:
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                if (priv->hrxqs)
                        mlx5_list_destroy(priv->hrxqs);
+               if (eth_dev && priv->flex_item_map)
+                       mlx5_flex_item_port_cleanup(eth_dev);
                mlx5_free(priv);
                if (eth_dev != NULL)
                        eth_dev->data->dev_private = NULL;
index 636a1be890228d2dee1030a0a429358f5b57e5dc..2f6d8cbb3d3ae00492c9a5398129d174e2c3ed9e 100644 (file)
@@ -17,6 +17,7 @@ sources = files(
         'mlx5_flow_meter.c',
         'mlx5_flow_dv.c',
         'mlx5_flow_aso.c',
+        'mlx5_flow_flex.c',
         'mlx5_mac.c',
         'mlx5_rss.c',
         'mlx5_rx.c',
index cd7bb3f27c4626bd34bb8855f2c789690ecda52c..a4a0e258a96af5ebbdcd78d43d717ec7b91dd364 100644 (file)
@@ -378,7 +378,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
        },
 };
 
-
 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
 
@@ -1683,6 +1682,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        mlx5_mp_os_req_stop_rxtx(dev);
        /* Free the eCPRI flex parser resource. */
        mlx5_flex_parser_ecpri_release(dev);
+       mlx5_flex_item_port_cleanup(dev);
        if (priv->rxqs != NULL) {
                /* XXX race condition if mlx5_rx_burst() is still running. */
                rte_delay_us_sleep(1000);
index 912c4a183bc2c39df80ceeb6371b98efd1c83076..f0c1775f8c71503f2e136cb76e34ec3201046094 100644 (file)
@@ -50,6 +50,9 @@
 #define MLX5_MAX_MODIFY_NUM                    32
 #define MLX5_ROOT_TBL_MODIFY_NUM               16
 
+/* Maximal number of flex items created on the port.*/
+#define MLX5_PORT_FLEX_ITEM_NUM                        4
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1096,6 +1099,12 @@ struct mlx5_lag {
        uint8_t affinity_mode; /* TIS or hash based affinity */
 };
 
+/* Port flex item context. */
+struct mlx5_flex_item {
+       struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
+       uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+};
+
 /*
  * Shared Infiniband device context for Master/Representors
  * which belong to same IB device with multiple IB ports.
@@ -1425,6 +1434,10 @@ struct mlx5_priv {
        struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
        uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
        uint32_t lag_affinity_idx; /* LAG mode queue 0 affinity starting. */
+       rte_spinlock_t flex_item_sl; /* Flex item list spinlock. */
+       struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
+       /* Flex items have been created on the port. */
+       uint32_t flex_item_map; /* Map of allocated flex item elements. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -1804,4 +1817,15 @@ mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);
 uint32_t
 mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);
 
+/* mlx5_flow_flex.c */
+
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+                   const struct rte_flow_item_flex_conf *conf,
+                   struct rte_flow_error *error);
+int flow_dv_item_release(struct rte_eth_dev *dev,
+                   const struct rte_flow_item_flex_handle *flex_handle,
+                   struct rte_flow_error *error);
+int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
+void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 #endif /* RTE_PMD_MLX5_H_ */
index 9904bc5863d10fe628c3fd7821f15325933bb80b..5435660a2dd78fef111e8be3673642618589d3af 100644 (file)
@@ -748,6 +748,14 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
                                  struct rte_mbuf *m,
                                  struct rte_flow_restore_info *info,
                                  struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+                          const struct rte_flow_item_flex_conf *conf,
+                          struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+                           const struct rte_flow_item_flex_handle *handle,
+                           struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -767,6 +775,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
        .tunnel_item_release = mlx5_flow_tunnel_item_release,
        .get_restore_info = mlx5_flow_tunnel_get_restore_info,
+       .flex_item_create = mlx5_flow_flex_item_create,
+       .flex_item_release = mlx5_flow_flex_item_release,
 };
 
 /* Tunnel information. */
@@ -9788,6 +9798,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
 }
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+                          const struct rte_flow_item_flex_conf *conf,
+                          struct rte_flow_error *error)
+{
+       static const char err_msg[] = "flex item creation unsupported";
+       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+       if (!fops->item_create) {
+               DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+               rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, err_msg);
+               return NULL;
+       }
+       return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+                           const struct rte_flow_item_flex_handle *handle,
+                           struct rte_flow_error *error)
+{
+       static const char err_msg[] = "flex item release unsupported";
+       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+       if (!fops->item_release) {
+               DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+               rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, err_msg);
+               return -rte_errno;
+       }
+       return fops->item_release(dev, handle, error);
+}
+
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
 {
index 8fbc37feb7e519eedc8bc2630fc0f35560251192..458634dab39cbdc877e5e4aee076fb44ceed7cba 100644 (file)
@@ -1235,6 +1235,19 @@ typedef void (*mlx5_flow_destroy_def_policy_t)
 typedef int (*mlx5_flow_discover_priorities_t)
                        (struct rte_eth_dev *dev,
                         const uint16_t *vprio, int vprio_n);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+                       (struct rte_eth_dev *dev,
+                        const struct rte_flow_item_flex_conf *conf,
+                        struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+                       (struct rte_eth_dev *dev,
+                        const struct rte_flow_item_flex_handle *handle,
+                        struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+                       (struct rte_eth_dev *dev,
+                        const struct rte_flow_item_flex_handle *handle,
+                        const struct rte_flow_item_flex_conf *conf,
+                        struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
        mlx5_flow_validate_t validate;
@@ -1270,6 +1283,9 @@ struct mlx5_flow_driver_ops {
        mlx5_flow_action_query_t action_query;
        mlx5_flow_sync_domain_t sync_domain;
        mlx5_flow_discover_priorities_t discover_priorities;
+       mlx5_flow_item_create_t item_create;
+       mlx5_flow_item_release_t item_release;
+       mlx5_flow_item_update_t item_update;
 };
 
 /* mlx5_flow.c */
@@ -1728,6 +1744,4 @@ const struct mlx5_flow_tunnel *
 mlx5_get_tof(const struct rte_flow_item *items,
             const struct rte_flow_action *actions,
             enum mlx5_tof_rule_type *rule_type);
-
-
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
index 1ca9739d7e8d62b5a9363a469faa4ed286868361..38110cc0d61a8ea6df14751b286660c44728cfeb 100644 (file)
@@ -18068,7 +18068,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .action_query = flow_dv_action_query,
        .sync_domain = flow_dv_sync_domain,
        .discover_priorities = flow_dv_discover_priorities,
+       .item_create = flow_dv_item_create,
+       .item_release = flow_dv_item_release,
 };
-
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
new file mode 100644 (file)
index 0000000..b7bc4af
--- /dev/null
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 NVIDIA Corporation & Affiliates
+ */
+#include <rte_malloc.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
+#include "mlx5.h"
+#include "mlx5_flow.h"
+
+static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
+             "Flex item maximal number exceeds uint32_t bit width");
+
+/**
+ *  Routine called once on port initialization to init flex item
+ *  related infrastructure initialization
+ *
+ * @param dev
+ *   Ethernet device to perform flex item initialization
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_item_port_init(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       rte_spinlock_init(&priv->flex_item_sl);
+       MLX5_ASSERT(!priv->flex_item_map);
+       return 0;
+}
+
+/**
+ *  Routine called once on port close to perform flex item
+ *  related infrastructure cleanup.
+ *
+ * @param dev
+ *   Ethernet device to perform cleanup
+ */
+void
+mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t i;
+
+       for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
+               if (priv->flex_item_map & (1 << i)) {
+                       /* DevX object dereferencing should be provided here. */
+                       priv->flex_item_map &= ~(1 << i);
+               }
+       }
+}
+
+static int
+mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+       uintptr_t start = (uintptr_t)&priv->flex_item[0];
+       uintptr_t entry = (uintptr_t)item;
+       uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
+
+       if (entry < start ||
+           idx >= MLX5_PORT_FLEX_ITEM_NUM ||
+           (entry - start) % sizeof(struct mlx5_flex_item) ||
+           !(priv->flex_item_map & (1u << idx)))
+               return -1;
+       return (int)idx;
+}
+
+static struct mlx5_flex_item *
+mlx5_flex_alloc(struct mlx5_priv *priv)
+{
+       struct mlx5_flex_item *item = NULL;
+
+       rte_spinlock_lock(&priv->flex_item_sl);
+       if (~priv->flex_item_map) {
+               uint32_t idx = rte_bsf32(~priv->flex_item_map);
+
+               if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
+                       item = &priv->flex_item[idx];
+                       MLX5_ASSERT(!item->refcnt);
+                       MLX5_ASSERT(!item->devx_fp);
+                       item->devx_fp = NULL;
+                       __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+                       priv->flex_item_map |= 1u << idx;
+               }
+       }
+       rte_spinlock_unlock(&priv->flex_item_sl);
+       return item;
+}
+
+static void
+mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+       int idx = mlx5_flex_index(priv, item);
+
+       MLX5_ASSERT(idx >= 0 &&
+                   idx < MLX5_PORT_FLEX_ITEM_NUM &&
+                   (priv->flex_item_map & (1u << idx)));
+       if (idx >= 0) {
+               rte_spinlock_lock(&priv->flex_item_sl);
+               MLX5_ASSERT(!item->refcnt);
+               MLX5_ASSERT(!item->devx_fp);
+               item->devx_fp = NULL;
+               __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+               priv->flex_item_map &= ~(1u << idx);
+               rte_spinlock_unlock(&priv->flex_item_sl);
+       }
+}
+
+/**
+ * Create the flex item with specified configuration over the Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to create flex item on.
+ * @param[in] conf
+ *   Flex item configuration.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+                   const struct rte_flow_item_flex_conf *conf,
+                   struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flex_item *flex;
+
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       flex = mlx5_flex_alloc(priv);
+       if (!flex) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "too many flex items created on the port");
+               return NULL;
+       }
+       RTE_SET_USED(conf);
+       /* Mark initialized flex item valid. */
+       __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+       return (struct rte_flow_item_flex_handle *)flex;
+}
+
+/**
+ * Release the flex item on the specified Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to destroy flex item on.
+ * @param[in] handle
+ *   Handle of the item existing on the specified device.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_item_release(struct rte_eth_dev *dev,
+                    const struct rte_flow_item_flex_handle *handle,
+                    struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flex_item *flex =
+               (struct mlx5_flex_item *)(uintptr_t)handle;
+       uint32_t old_refcnt = 1;
+
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       rte_spinlock_lock(&priv->flex_item_sl);
+       if (mlx5_flex_index(priv, flex) < 0) {
+               rte_spinlock_unlock(&priv->flex_item_sl);
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "invalid flex item handle value");
+       }
+       if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
+                                        __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+               rte_spinlock_unlock(&priv->flex_item_sl);
+               return rte_flow_error_set(error, EBUSY,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "flex item has flow references");
+       }
+       /* Flex item is marked as invalid, we can leave locked section. */
+       rte_spinlock_unlock(&priv->flex_item_sl);
+       mlx5_flex_free(priv, flex);
+       return 0;
+}