]> git.droids-corp.org - dpdk.git/commitdiff
vdpa/mlx5: support virtio queue statistics get
authorMatan Azrad <matan@mellanox.com>
Thu, 18 Jun 2020 18:59:43 +0000 (18:59 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 30 Jun 2020 12:52:29 +0000 (14:52 +0200)
Add support for statistics operations.

A DevX counter object is allocated per virtq in order to
manage the virtq statistics.

The counter object is allocated before the virtq creation
and destroyed after it, so the statistics are valid only in
the life time of the virtq.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
doc/guides/rel_notes/release_20_08.rst
doc/guides/vdpadevs/features/mlx5.ini
drivers/vdpa/mlx5/mlx5_vdpa.c
drivers/vdpa/mlx5/mlx5_vdpa.h
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c

index 5f23c9578494d10e73a3acb46a4ddef87033edf0..97d8db16352c1f73c9f9f82f336fd54acf0b2282 100644 (file)
@@ -73,6 +73,12 @@ New Features
      A new 3 APIs has been added to query virtio queue statistics, to get their
      names and to reset them by a vDPA device.
 
+* **Updated Mellanox mlx5 vDPA driver.**
+
+  Updated Mellanox mlx5 vDPA driver with new features, including:
+
+  * Added support for virtio queue statistics.
+
 * **Added support for BPF_ABS/BPF_IND load instructions.**
 
   Added support for two BPF non-generic instructions:
index 1da9c1bf7ea1ede0398ef1fb47ce2f429dfa4aa3..788d4e0b190ffa300b1bbd3aec120637b0c4ef12 100644 (file)
@@ -17,6 +17,7 @@ packed               = Y
 proto mq             = Y
 proto log shmfd      = Y
 proto host notifier  = Y
+queue statistics     = Y
 Other kdrv           = Y
 ARMv8                = Y
 Power8               = Y
index 1113d6cef027f6e3885ee76f721f3cdbd886ce86..a80e3f4aed6a0de514b0031bf3a3466b1d049922 100644 (file)
@@ -8,6 +8,7 @@
 #include <rte_errno.h>
 #include <rte_bus_pci.h>
 #include <rte_pci.h>
+#include <rte_string_fns.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_common.h>
@@ -274,6 +275,85 @@ mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
        return 0;
 }
 
+static int
+mlx5_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
+                         unsigned int size)
+{
+       static const char *mlx5_vdpa_stats_names[MLX5_VDPA_STATS_MAX] = {
+               "received_descriptors",
+               "completed_descriptors",
+               "bad descriptor errors",
+               "exceed max chain",
+               "invalid buffer",
+               "completion errors",
+       };
+       struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+       unsigned int i;
+
+       if (priv == NULL) {
+               DRV_LOG(ERR, "Invalid device id: %d.", did);
+               return -ENODEV;
+       }
+       if (!stats_names)
+               return MLX5_VDPA_STATS_MAX;
+       size = RTE_MIN(size, (unsigned int)MLX5_VDPA_STATS_MAX);
+       for (i = 0; i < size; ++i)
+               strlcpy(stats_names[i].name, mlx5_vdpa_stats_names[i],
+                       RTE_VDPA_STATS_NAME_SIZE);
+       return size;
+}
+
+static int
+mlx5_vdpa_get_stats(int did, int qid, struct rte_vdpa_stat *stats,
+                   unsigned int n)
+{
+       struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+       if (priv == NULL) {
+               DRV_LOG(ERR, "Invalid device id: %d.", did);
+               return -ENODEV;
+       }
+       if (!priv->configured) {
+               DRV_LOG(ERR, "Device %d was not configured.", did);
+               return -ENODATA;
+       }
+       if (qid >= (int)priv->nr_virtqs) {
+               DRV_LOG(ERR, "Too big vring id: %d.", qid);
+               return -E2BIG;
+       }
+       if (!priv->caps.queue_counters_valid) {
+               DRV_LOG(ERR, "Virtq statistics is not supported for device %d.",
+                       did);
+               return -ENOTSUP;
+       }
+       return mlx5_vdpa_virtq_stats_get(priv, qid, stats, n);
+}
+
+static int
+mlx5_vdpa_reset_stats(int did, int qid)
+{
+       struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+       if (priv == NULL) {
+               DRV_LOG(ERR, "Invalid device id: %d.", did);
+               return -ENODEV;
+       }
+       if (!priv->configured) {
+               DRV_LOG(ERR, "Device %d was not configured.", did);
+               return -ENODATA;
+       }
+       if (qid >= (int)priv->nr_virtqs) {
+               DRV_LOG(ERR, "Too big vring id: %d.", qid);
+               return -E2BIG;
+       }
+       if (!priv->caps.queue_counters_valid) {
+               DRV_LOG(ERR, "Virtq statistics is not supported for device %d.",
+                       did);
+               return -ENOTSUP;
+       }
+       return mlx5_vdpa_virtq_stats_reset(priv, qid);
+}
+
 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
        .get_queue_num = mlx5_vdpa_get_queue_num,
        .get_features = mlx5_vdpa_get_vdpa_features,
@@ -286,6 +366,9 @@ static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
        .get_vfio_group_fd = NULL,
        .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
        .get_notify_area = mlx5_vdpa_get_notify_area,
+       .get_stats_names = mlx5_vdpa_get_stats_names,
+       .get_stats = mlx5_vdpa_get_stats,
+       .reset_stats = mlx5_vdpa_reset_stats,
 };
 
 static struct ibv_device *
@@ -489,6 +572,8 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                rte_errno = ENOTSUP;
                goto error;
        }
+       if (!attr.vdpa.queue_counters_valid)
+               DRV_LOG(DEBUG, "No capability to support virtq statistics.");
        priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
                           sizeof(struct mlx5_vdpa_virtq) *
                           attr.vdpa.max_num_virtio_queues * 2,
index fcc216ac78d0e1fc28f1577f190d8f0deb5e146c..80b4c4bda9075239627edd745e14d93a2be88087 100644 (file)
@@ -76,6 +76,7 @@ struct mlx5_vdpa_virtq {
        uint16_t vq_size;
        struct mlx5_vdpa_priv *priv;
        struct mlx5_devx_obj *virtq;
+       struct mlx5_devx_obj *counters;
        struct mlx5_vdpa_event_qp eqp;
        struct {
                struct mlx5dv_devx_umem *obj;
@@ -83,6 +84,7 @@ struct mlx5_vdpa_virtq {
                uint32_t size;
        } umems[3];
        struct rte_intr_handle intr_handle;
+       struct mlx5_devx_virtio_q_couners_attr reset;
 };
 
 struct mlx5_vdpa_steer {
@@ -127,6 +129,16 @@ struct mlx5_vdpa_priv {
        struct mlx5_vdpa_virtq virtqs[];
 };
 
+enum {
+       MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
+       MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
+       MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
+       MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
+       MLX5_VDPA_STATS_INVALID_BUFFER,
+       MLX5_VDPA_STATS_COMPLETION_ERRORS,
+       MLX5_VDPA_STATS_MAX
+};
+
 /*
  * Check whether virtq is for traffic receive.
  * According to VIRTIO_NET Spec the virtqueues index identity its type by:
@@ -352,4 +364,37 @@ int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
  */
 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
 
+/**
+ * Get virtq statistics.
+ *
+ * @param[in] priv
+ *   The vdpa driver private structure.
+ * @param[in] qid
+ *   The virtq index.
+ * @param stats
+ *   The virtq statistics array to fill.
+ * @param n
+ *   The number of elements in @p stats array.
+ *
+ * @return
+ *   A negative value on error, otherwise the number of entries filled in the
+ *   @p stats array.
+ */
+int
+mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+                         struct rte_vdpa_stat *stats, unsigned int n);
+
+/**
+ * Reset virtq statistics.
+ *
+ * @param[in] priv
+ *   The vdpa driver private structure.
+ * @param[in] qid
+ *   The virtq index.
+ *
+ * @return
+ *   A negative value on error, otherwise 0.
+ */
+int
+mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
 #endif /* RTE_PMD_MLX5_VDPA_H_ */
index bd48460b5bedbbee1ea4aadfe79c95edc6061c26..e99d82c3579b5616d8ced80d921a8352b28b3593 100644 (file)
@@ -72,6 +72,11 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
                        rte_free(virtq->umems[i].buf);
        }
        memset(&virtq->umems, 0, sizeof(virtq->umems));
+       if (virtq->counters) {
+               claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
+               virtq->counters = NULL;
+       }
+       memset(&virtq->reset, 0, sizeof(virtq->reset));
        if (virtq->eqp.fw_qp)
                mlx5_vdpa_event_qp_destroy(&virtq->eqp);
        return 0;
@@ -205,6 +210,16 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
                DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
                        " need event QPs and event mechanism.", index);
        }
+       if (priv->caps.queue_counters_valid) {
+               virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
+                                                                   (priv->ctx);
+               if (!virtq->counters) {
+                       DRV_LOG(ERR, "Failed to create virtq couners for virtq"
+                               " %d.", index);
+                       goto error;
+               }
+               attr.counters_obj_id = virtq->counters->id;
+       }
        /* Setup 3 UMEMs for each virtq. */
        for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
                virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
@@ -455,3 +470,82 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
        }
        return 0;
 }
+
+int
+mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+                         struct rte_vdpa_stat *stats, unsigned int n)
+{
+       struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
+       struct mlx5_devx_virtio_q_couners_attr attr = {0};
+       int ret;
+
+       if (!virtq->virtq || !virtq->enable) {
+               DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
+                       "is invalid.", qid);
+               return -EINVAL;
+       }
+       MLX5_ASSERT(virtq->counters);
+       ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
+               return ret;
+       }
+       ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
+       if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
+               return ret;
+       stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
+               .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
+               .value = attr.received_desc - virtq->reset.received_desc,
+       };
+       if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
+               return ret;
+       stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
+               .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
+               .value = attr.completed_desc - virtq->reset.completed_desc,
+       };
+       if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
+               return ret;
+       stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
+               .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
+               .value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
+       };
+       if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
+               return ret;
+       stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
+               .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
+               .value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
+       };
+       if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
+               return ret;
+       stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
+               .id = MLX5_VDPA_STATS_INVALID_BUFFER,
+               .value = attr.invalid_buffer - virtq->reset.invalid_buffer,
+       };
+       if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
+               return ret;
+       stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
+               .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
+               .value = attr.error_cqes - virtq->reset.error_cqes,
+       };
+       return ret;
+}
+
+int
+mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
+{
+       struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
+       int ret;
+
+       if (!virtq->virtq || !virtq->enable) {
+               DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
+                       "is invalid.", qid);
+               return -EINVAL;
+       }
+       MLX5_ASSERT(virtq->counters);
+       ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
+                                                   &virtq->reset);
+       if (ret)
+               DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
+                       qid);
+       return ret;
+}