net/mlx5: modify hash Rx queue objects
authorAndrey Vesnovaty <andreyv@nvidia.com>
Fri, 23 Oct 2020 10:24:08 +0000 (13:24 +0300)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 3 Nov 2020 22:35:02 +0000 (23:35 +0100)
Implement modification for hashed table of Rx queue object (see
mlx5_hrxq_modify()). This implementation relies on the capability to
modify TIR object via DevX API, i.e. current implementation doesn't
support verbs HW object operations. The functionality to modify hashed
table of Rx queue object is prerequisite to implement
rete_flow_shared_action_update() for shared RSS action in mlx5 PMD.

Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h

index c9d5d71..944e177 100644 (file)
@@ -727,6 +727,7 @@ struct mlx5_ind_table_obj {
 struct mlx5_hrxq {
        ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
        rte_atomic32_t refcnt; /* Reference counter. */
+       uint32_t shared:1; /* This object used in shared action. */
        struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
        RTE_STD_C11
        union {
@@ -798,6 +799,10 @@ struct mlx5_obj_ops {
        void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
        int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
                        int tunnel __rte_unused);
+       int (*hrxq_modify)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+                          const uint8_t *rss_key,
+                          uint64_t hash_fields,
+                          const struct mlx5_ind_table_obj *ind_tbl);
        void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
        int (*drop_action_create)(struct rte_eth_dev *dev);
        void (*drop_action_destroy)(struct rte_eth_dev *dev);
index 0c99fe7..5fce4cd 100644 (file)
@@ -738,33 +738,37 @@ mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
 }
 
 /**
- * Create an Rx Hash queue.
+ * Set TIR attribute struct with relevant input values.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
- * @param hrxq
- *   Pointer to Rx Hash queue.
- * @param tunnel
+ * @param[in] rss_key
+ *   RSS key for the Rx hash queue.
+ * @param[in] hash_fields
+ *   Verbs protocol hash field to make the RSS on.
+ * @param[in] ind_tbl
+ *   Indirection table for TIR.
+ * @param[in] tunnel
  *   Tunnel type.
+ * @param[out] tir_attr
+ *   Parameters structure for TIR creation/modification.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
  */
-static int
-mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
-                  int tunnel __rte_unused)
+static void
+mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
+                      uint64_t hash_fields,
+                      const struct mlx5_ind_table_obj *ind_tbl,
+                      int tunnel, struct mlx5_devx_tir_attr *tir_attr)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
        struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
        struct mlx5_rxq_ctrl *rxq_ctrl =
                container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-       struct mlx5_devx_tir_attr tir_attr;
-       const uint8_t *rss_key = hrxq->rss_key;
-       uint64_t hash_fields = hrxq->hash_fields;
+       enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
        bool lro = true;
        uint32_t i;
-       int err;
 
        /* Enable TIR LRO only if all the queues were configured for. */
        for (i = 0; i < ind_tbl->queues_n; ++i) {
@@ -773,26 +777,24 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
                        break;
                }
        }
-       memset(&tir_attr, 0, sizeof(tir_attr));
-       tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
-       tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
-       tir_attr.tunneled_offload_en = !!tunnel;
+       memset(tir_attr, 0, sizeof(*tir_attr));
+       tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
+       tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
+       tir_attr->tunneled_offload_en = !!tunnel;
        /* If needed, translate hash_fields bitmap to PRM format. */
        if (hash_fields) {
-               struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL;
+               struct mlx5_rx_hash_field_select *rx_hash_field_select =
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-               rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
-                                      &tir_attr.rx_hash_field_selector_inner :
-                                      &tir_attr.rx_hash_field_selector_outer;
-#else
-               rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
+                       hash_fields & IBV_RX_HASH_INNER ?
+                               &tir_attr->rx_hash_field_selector_inner :
 #endif
+                               &tir_attr->rx_hash_field_selector_outer;
                /* 1 bit: 0: IPv4, 1: IPv6. */
                rx_hash_field_select->l3_prot_type =
                                        !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
                /* 1 bit: 0: TCP, 1: UDP. */
                rx_hash_field_select->l4_prot_type =
-                                        !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
+                                       !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
                /* Bitmask which sets which fields to use in RX Hash. */
                rx_hash_field_select->selected_fields =
                        ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
@@ -804,20 +806,47 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
                        (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
                         MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
        }
-       if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
-               tir_attr.transport_domain = priv->sh->td->id;
+       if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
+               tir_attr->transport_domain = priv->sh->td->id;
        else
-               tir_attr.transport_domain = priv->sh->tdn;
-       memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
-       tir_attr.indirect_table = ind_tbl->rqt->id;
+               tir_attr->transport_domain = priv->sh->tdn;
+       memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+       tir_attr->indirect_table = ind_tbl->rqt->id;
        if (dev->data->dev_conf.lpbk_mode)
-               tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+               tir_attr->self_lb_block =
+                                       MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
        if (lro) {
-               tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout;
-               tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
-               tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-                                          MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
+               tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
+               tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
+               tir_attr->lro_enable_mask =
+                               MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+                               MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
        }
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Pointer to Rx Hash queue.
+ * @param tunnel
+ *   Tunnel type.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+                  int tunnel __rte_unused)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_devx_tir_attr tir_attr = {0};
+       int err;
+
+       mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
+                              hrxq->ind_table, tunnel, &tir_attr);
        hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
        if (!hrxq->tir) {
                DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
@@ -854,6 +883,57 @@ mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
        claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
 }
 
+/**
+ * Modify an Rx Hash queue configuration.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Hash Rx queue to modify.
+ * @param rss_key
+ *   RSS key for the Rx hash queue.
+ * @param hash_fields
+ *   Verbs protocol hash field to make the RSS on.
+ * @param[in] ind_tbl
+ *   Indirection table for TIR.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+                      const uint8_t *rss_key,
+                      uint64_t hash_fields,
+                      const struct mlx5_ind_table_obj *ind_tbl)
+{
+       struct mlx5_devx_modify_tir_attr modify_tir = {0};
+
+       /*
+        * untested for modification fields:
+        * - rx_hash_symmetric not set in hrxq_new(),
+        * - rx_hash_fn set hard-coded in hrxq_new(),
+        * - lro_xxx not set after rxq setup
+        */
+       if (ind_tbl != hrxq->ind_table)
+               modify_tir.modify_bitmask |=
+                       MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
+       if (hash_fields != hrxq->hash_fields ||
+                       memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
+               modify_tir.modify_bitmask |=
+                       MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
+       mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
+                              0, /* N/A - tunnel modification unsupported */
+                              &modify_tir.tir);
+       modify_tir.tirn = hrxq->tir->id;
+       if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
+               DRV_LOG(ERR, "port %u cannot modify DevX TIR",
+                       dev->data->port_id);
+               rte_errno = errno;
+               return -rte_errno;
+       }
+       return 0;
+}
+
 /**
  * Create a DevX drop action for Rx Hash queue.
  *
@@ -1364,6 +1444,7 @@ struct mlx5_obj_ops devx_obj_ops = {
        .ind_table_destroy = mlx5_devx_ind_table_destroy,
        .hrxq_new = mlx5_devx_hrxq_new,
        .hrxq_destroy = mlx5_devx_tir_destroy,
+       .hrxq_modify = mlx5_devx_hrxq_modify,
        .drop_action_create = mlx5_devx_drop_action_create,
        .drop_action_destroy = mlx5_devx_drop_action_destroy,
        .txq_obj_new = mlx5_txq_devx_obj_new,
index 15cd34e..2bac7da 100644 (file)
@@ -8425,20 +8425,16 @@ flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
        struct mlx5_hrxq *hrxq;
 
        MLX5_ASSERT(rss_desc->queue_num);
-       *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
-                                 MLX5_RSS_HASH_KEY_LEN,
+       *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
                                  dev_flow->hash_fields,
-                                 rss_desc->queue,
-                                 rss_desc->queue_num);
+                                 rss_desc->queue, rss_desc->queue_num);
        if (!*hrxq_idx) {
                *hrxq_idx = mlx5_hrxq_new
-                               (dev, rss_desc->key,
-                                MLX5_RSS_HASH_KEY_LEN,
+                               (dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
                                 dev_flow->hash_fields,
-                                rss_desc->queue,
-                                rss_desc->queue_num,
-                                !!(dh->layers &
-                                MLX5_FLOW_LAYER_TUNNEL));
+                                rss_desc->queue, rss_desc->queue_num,
+                                !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL),
+                                false);
                if (!*hrxq_idx)
                        return NULL;
        }
@@ -10026,7 +10022,8 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                 rss_desc->queue,
                                                 rss_desc->queue_num,
                                                 !!(dh->layers &
-                                                MLX5_FLOW_LAYER_TUNNEL));
+                                                MLX5_FLOW_LAYER_TUNNEL),
+                                                false);
                        }
                        hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                                              hrxq_idx);
index 710622c..9cc4410 100644 (file)
@@ -1984,7 +1984,8 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                 rss_desc->queue,
                                                 rss_desc->queue_num,
                                                 !!(handle->layers &
-                                                MLX5_FLOW_LAYER_TUNNEL));
+                                                MLX5_FLOW_LAYER_TUNNEL),
+                                                false);
                        hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                                              hrxq_idx);
                        if (!hrxq) {
index 668caf8..fc7d097 100644 (file)
@@ -1731,6 +1731,29 @@ mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
        return MLX5_RXQ_TYPE_UNDEFINED;
 }
 
+/**
+ * Match queues listed in arguments to queues contained in indirection table
+ * object.
+ *
+ * @param ind_tbl
+ *   Pointer to indirection table to match.
+ * @param queues
+ *   Queues to match to ques in indirection table.
+ * @param queues_n
+ *   Number of queues in the array.
+ *
+ * @return
+ *   1 if all queues in indirection table match 0 othrwise.
+ */
+static int
+mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
+                      const uint16_t *queues, uint32_t queues_n)
+{
+               return (ind_tbl->queues_n == queues_n) &&
+                   (!memcmp(ind_tbl->queues, queues,
+                           ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
+}
+
 /**
  * Get an indirection table.
  *
@@ -1908,6 +1931,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
                      hrxq, next) {
                struct mlx5_ind_table_obj *ind_tbl;
 
+               if (hrxq->shared)
+                       continue;
                if (hrxq->rss_key_len != rss_key_len)
                        continue;
                if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
@@ -1927,6 +1952,86 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Modify an Rx Hash queue configuration.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Index to Hash Rx queue to modify.
+ * @param rss_key
+ *   RSS key for the Rx hash queue.
+ * @param rss_key_len
+ *   RSS key length.
+ * @param hash_fields
+ *   Verbs protocol hash field to make the RSS on.
+ * @param queues
+ *   Queues entering in hash queue. In case of empty hash_fields only the
+ *   first queue index will be taken for the indirection table.
+ * @param queues_n
+ *   Number of queues.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
+                const uint8_t *rss_key, uint32_t rss_key_len,
+                uint64_t hash_fields,
+                const uint16_t *queues, uint32_t queues_n)
+{
+       int err;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hrxq *hrxq =
+               mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+       int ret;
+
+       if (!hrxq) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+       /* validations */
+       if (hrxq->rss_key_len != rss_key_len) {
+               /* rss_key_len is fixed size 40 byte & not supposed to change */
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+       queues_n = hash_fields ? queues_n : 1;
+       if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
+                                           queues, queues_n)) {
+               ind_tbl = hrxq->ind_table;
+       } else {
+               ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+               if (!ind_tbl)
+                       ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+       }
+       if (!ind_tbl) {
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       MLX5_ASSERT(priv->obj_ops.hrxq_modify);
+       ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
+                                       hash_fields, ind_tbl);
+       if (ret) {
+               rte_errno = errno;
+               goto error;
+       }
+       if (ind_tbl != hrxq->ind_table) {
+               mlx5_ind_table_obj_release(dev, hrxq->ind_table);
+               hrxq->ind_table = ind_tbl;
+       }
+       hrxq->hash_fields = hash_fields;
+       memcpy(hrxq->rss_key, rss_key, rss_key_len);
+       return 0;
+error:
+       err = rte_errno;
+       if (ind_tbl != hrxq->ind_table)
+               mlx5_ind_table_obj_release(dev, ind_tbl);
+       rte_errno = err;
+       return -rte_errno;
+}
+
 /**
  * Release the hash Rx queue.
  *
@@ -1980,6 +2085,8 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
  *   Number of queues.
  * @param tunnel
  *   Tunnel type.
+ * @param shared
+ *   If true new object of Rx Hash queue will be used in shared action.
  *
  * @return
  *   The DevX object initialized index, 0 otherwise and rte_errno is set.
@@ -1989,7 +2096,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
              const uint16_t *queues, uint32_t queues_n,
-             int tunnel __rte_unused)
+             int tunnel, bool shared)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq = NULL;
@@ -2008,6 +2115,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
        if (!hrxq)
                goto error;
+       hrxq->shared = !!shared;
        hrxq->ind_table = ind_tbl;
        hrxq->rss_key_len = rss_key_len;
        hrxq->hash_fields = hash_fields;
index 7e642ea..2ca0a47 100644 (file)
@@ -341,7 +341,7 @@ uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
                       const uint8_t *rss_key, uint32_t rss_key_len,
                       uint64_t hash_fields,
                       const uint16_t *queues, uint32_t queues_n,
-                      int tunnel __rte_unused);
+                      int tunnel, bool shared);
 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
                       const uint8_t *rss_key, uint32_t rss_key_len,
                       uint64_t hash_fields,
@@ -354,7 +354,10 @@ void mlx5_drop_action_destroy(struct rte_eth_dev *dev);
 uint64_t mlx5_get_rx_port_offloads(void);
 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
-
+int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
+                    const uint8_t *rss_key, uint32_t rss_key_len,
+                    uint64_t hash_fields,
+                    const uint16_t *queues, uint32_t queues_n);
 
 /* mlx5_txq.c */