net/mlx5: support queue/RSS actions for external Rx queue
authorMichael Baum <michaelba@nvidia.com>
Thu, 24 Feb 2022 23:25:11 +0000 (01:25 +0200)
committerThomas Monjalon <thomas@monjalon.net>
Fri, 25 Feb 2022 16:33:31 +0000 (17:33 +0100)
Add support queue/RSS action for external Rx queue.
In indirection table creation, the queue index will be taken from
mapping array.

This feature supports neither LRO nor Hairpin.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
doc/guides/nics/mlx5.rst
doc/guides/rel_notes/release_22_03.rst
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c

index e6f2ec2..c31a154 100644 (file)
@@ -38,6 +38,7 @@ Features
 - Multiple TX and RX queues.
 - Shared Rx queue.
 - Rx queue delay drop.
+- Support steering for external Rx queue created outside the PMD.
 - Support for scattered TX frames.
 - Advanced support for scattered Rx frames with tunable buffer attributes.
 - IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
index 27d7aad..60e5b4f 100644 (file)
@@ -168,6 +168,7 @@ New Features
 
   * Supported ConnectX-7 capability to schedule traffic sending on timestamp.
   * Added WQE based hardware steering support with ``rte_flow_async`` API.
+  * Added steering for external Rx queue created outside the PMD.
   * Added GRE optional fields matching.
 
 * **Updated Wangxun ngbe driver.**
index 5ecca2d..74841ca 100644 (file)
@@ -1912,6 +1912,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (ret)
                DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
                        dev->data->port_id);
+       ret = mlx5_ext_rxq_verify(dev);
+       if (ret)
+               DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
+                       dev->data->port_id);
        ret = mlx5_rxq_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some Rx queues still remain",
index bcd2358..af106bd 100644 (file)
@@ -580,13 +580,21 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
                return rqt_attr;
        }
        for (i = 0; i != queues_n; ++i) {
-               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);
+               if (mlx5_is_external_rxq(dev, queues[i])) {
+                       struct mlx5_external_rxq *ext_rxq =
+                                       mlx5_ext_rxq_get(dev, queues[i]);
 
-               MLX5_ASSERT(rxq != NULL);
-               if (rxq->ctrl->is_hairpin)
-                       rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
-               else
-                       rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
+                       rqt_attr->rq_list[i] = ext_rxq->hw_id;
+               } else {
+                       struct mlx5_rxq_priv *rxq =
+                                       mlx5_rxq_get(dev, queues[i]);
+
+                       MLX5_ASSERT(rxq != NULL);
+                       if (rxq->ctrl->is_hairpin)
+                               rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
+                       else
+                               rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
+               }
        }
        MLX5_ASSERT(i > 0);
        for (j = 0; i != rqt_n; ++j, ++i)
@@ -711,7 +719,13 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
        uint32_t i;
 
        /* NULL queues designate drop queue. */
-       if (ind_tbl->queues != NULL) {
+       if (ind_tbl->queues == NULL) {
+               is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin;
+       } else if (mlx5_is_external_rxq(dev, ind_tbl->queues[0])) {
+               /* External RxQ supports neither Hairpin nor LRO. */
+               is_hairpin = false;
+               lro = false;
+       } else {
                is_hairpin = mlx5_rxq_is_hairpin(dev, ind_tbl->queues[0]);
                /* Enable TIR LRO only if all the queues were configured for. */
                for (i = 0; i < ind_tbl->queues_n; ++i) {
@@ -723,8 +737,6 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
                                break;
                        }
                }
-       } else {
-               is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin;
        }
        memset(tir_attr, 0, sizeof(*tir_attr));
        tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
index ab48c52..a690e2d 100644 (file)
@@ -1743,6 +1743,12 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "can't have 2 fate actions in"
                                          " same flow");
+       if (attr->egress)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+                                         "queue action not supported for egress.");
+       if (mlx5_is_external_rxq(dev, queue->index))
+               return 0;
        if (!priv->rxqs_n)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -1757,11 +1763,6 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
                                          "queue is not configured");
-       if (attr->egress)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
-                                         "queue action not supported for "
-                                         "egress");
        return 0;
 }
 
@@ -1776,7 +1777,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
  *   Size of the @p queues array.
  * @param[out] error
  *   On error, filled with a textual error description.
- * @param[out] queue
+ * @param[out] queue_idx
  *   On error, filled with an offending queue index in @p queues array.
  *
  * @return
@@ -1789,17 +1790,27 @@ mlx5_validate_rss_queues(struct rte_eth_dev *dev,
 {
        const struct mlx5_priv *priv = dev->data->dev_private;
        bool is_hairpin = false;
+       bool is_ext_rss = false;
        uint32_t i;
 
        for (i = 0; i != queues_n; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev,
-                                                                  queues[i]);
+               struct mlx5_rxq_ctrl *rxq_ctrl;
 
+               if (mlx5_is_external_rxq(dev, queues[0])) {
+                       is_ext_rss = true;
+                       continue;
+               }
+               if (is_ext_rss) {
+                       *error = "Combining external and regular RSS queues is not supported";
+                       *queue_idx = i;
+                       return -ENOTSUP;
+               }
                if (queues[i] >= priv->rxqs_n) {
                        *error = "queue index out of range";
                        *queue_idx = i;
                        return -EINVAL;
                }
+               rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]);
                if (rxq_ctrl == NULL) {
                        *error =  "queue is not configured";
                        *queue_idx = i;
@@ -1894,7 +1905,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L4 partial RSS requested but L4 RSS"
                                          " type not specified");
-       if (!priv->rxqs_n)
+       if (!priv->rxqs_n && priv->ext_rxqs == NULL)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No Rx queues configured");
index aba05df..acebe33 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "mlx5.h"
 #include "mlx5_autoconf.h"
+#include "rte_pmd_mlx5.h"
 
 /* Support tunnel matching. */
 #define MLX5_FLOW_TUNNEL 10
@@ -217,8 +218,14 @@ uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_external_rxq *mlx5_ext_rxq_ref(struct rte_eth_dev *dev,
+                                          uint16_t idx);
+uint32_t mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_external_rxq *mlx5_ext_rxq_get(struct rte_eth_dev *dev,
+                                          uint16_t idx);
 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_verify(struct rte_eth_dev *dev);
+int mlx5_ext_rxq_verify(struct rte_eth_dev *dev);
 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
@@ -643,4 +650,27 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
        return n == n_ibv;
 }
 
+/**
+ * Check whether given RxQ is external.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param queue_idx
+ *   Rx queue index.
+ *
+ * @return
+ *   True if is external RxQ, otherwise false.
+ */
+static __rte_always_inline bool
+mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_external_rxq *rxq;
+
+       if (!priv->ext_rxqs || queue_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
+               return false;
+       rxq = &priv->ext_rxqs[queue_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+       return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+}
+
 #endif /* RTE_PMD_MLX5_RX_H_ */
index 889428f..ff293d9 100644 (file)
@@ -2084,6 +2084,65 @@ mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
        return rxq == NULL ? NULL : &rxq->ctrl->rxq;
 }
 
+/**
+ * Increase an external Rx queue reference count.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   External RX queue index.
+ *
+ * @return
+ *   A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_rxq *
+mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+
+       __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+       return rxq;
+}
+
+/**
+ * Decrease an external Rx queue reference count.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   External RX queue index.
+ *
+ * @return
+ *   Updated reference count.
+ */
+uint32_t
+mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+
+       return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
+/**
+ * Get an external Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   External Rx queue index.
+ *
+ * @return
+ *   A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_rxq *
+mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       MLX5_ASSERT(mlx5_is_external_rxq(dev, idx));
+       return &priv->ext_rxqs[idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+}
+
 /**
  * Release a Rx queue.
  *
@@ -2167,6 +2226,37 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
        return ret;
 }
 
+/**
+ * Verify the external Rx Queue list is empty.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The number of object not released.
+ */
+int
+mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_external_rxq *rxq;
+       uint32_t i;
+       int ret = 0;
+
+       if (priv->ext_rxqs == NULL)
+               return 0;
+
+       for (i = MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
+               rxq = mlx5_ext_rxq_get(dev, i);
+               if (rxq->refcnt < 2)
+                       continue;
+               DRV_LOG(DEBUG, "Port %u external RxQ %u still referenced.",
+                       dev->data->port_id, i);
+               ++ret;
+       }
+       return ret;
+}
+
 /**
  * Check whether RxQ type is Hairpin.
  *
@@ -2182,8 +2272,11 @@ bool
 mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+       struct mlx5_rxq_ctrl *rxq_ctrl;
 
+       if (mlx5_is_external_rxq(dev, idx))
+               return false;
+       rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
        return (idx < priv->rxqs_n && rxq_ctrl != NULL && rxq_ctrl->is_hairpin);
 }
 
@@ -2358,9 +2451,16 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
 
        if (ref_qs)
                for (i = 0; i != queues_n; ++i) {
-                       if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
-                               ret = -rte_errno;
-                               goto error;
+                       if (mlx5_is_external_rxq(dev, queues[i])) {
+                               if (mlx5_ext_rxq_ref(dev, queues[i]) == NULL) {
+                                       ret = -rte_errno;
+                                       goto error;
+                               }
+                       } else {
+                               if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
+                                       ret = -rte_errno;
+                                       goto error;
+                               }
                        }
                }
        ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
@@ -2371,8 +2471,12 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
 error:
        if (ref_qs) {
                err = rte_errno;
-               for (j = 0; j < i; j++)
-                       mlx5_rxq_deref(dev, queues[j]);
+               for (j = 0; j < i; j++) {
+                       if (mlx5_is_external_rxq(dev, queues[j]))
+                               mlx5_ext_rxq_deref(dev, queues[j]);
+                       else
+                               mlx5_rxq_deref(dev, queues[j]);
+               }
                rte_errno = err;
        }
        DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",