net/mlx5: convert hrxq to indexed
authorSuanming Mou <suanmingm@mellanox.com>
Thu, 16 Apr 2020 02:42:07 +0000 (10:42 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:09 +0000 (13:57 +0200)
This commit converts hrxq to indexed.

Using the uint32_t index instead of pointer saves 4 bytes memory for the
flow handle. For millions flows, it will save several MBytes of memory.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h

index 663ef2e..1d38aa2 100644 (file)
@@ -254,6 +254,17 @@ static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
                .free = rte_free,
                .type = "mlx5_jump_ipool",
        },
+       {
+               .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
+               .trunk_size = 64,
+               .grow_trunk = 3,
+               .grow_shift = 2,
+               .need_lock = 0,
+               .release_mem_en = 1,
+               .malloc = rte_malloc_socket,
+               .free = rte_free,
+               .type = "mlx5_hrxq_ipool",
+       },
 };
 
 
@@ -1392,16 +1403,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                close(priv->nl_socket_rdma);
        if (priv->vmwa_context)
                mlx5_vlan_vmwa_exit(priv->vmwa_context);
-       if (priv->sh) {
-               /*
-                * Free the shared context in last turn, because the cleanup
-                * routines above may use some shared fields, like
-                * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
-                * ifindex if Netlink fails.
-                */
-               mlx5_free_shared_ibctx(priv->sh);
-               priv->sh = NULL;
-       }
        ret = mlx5_hrxq_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@@ -1430,6 +1431,16 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (ret)
                DRV_LOG(WARNING, "port %u some flows still remain",
                        dev->data->port_id);
+       if (priv->sh) {
+               /*
+                * Free the shared context in last turn, because the cleanup
+                * routines above may use some shared fields, like
+                * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
+                * ifindex if Netlink fails.
+                */
+               mlx5_free_shared_ibctx(priv->sh);
+               priv->sh = NULL;
+       }
        if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
                unsigned int c = 0;
                uint16_t port_id;
index 59421dc..42b4818 100644 (file)
@@ -50,6 +50,7 @@ enum mlx5_ipool_index {
        MLX5_IPOOL_TAG, /* Pool for tag resource. */
        MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
        MLX5_IPOOL_JUMP, /* Pool for jump resource. */
+       MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
        MLX5_IPOOL_MAX,
 };
 
@@ -513,7 +514,7 @@ struct mlx5_priv {
        int flow_nested_idx; /* Intermediate device flow index, nested. */
        LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
        LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
-       LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
+       uint32_t hrxqs; /* Verbs Hash Rx queues. */
        LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
        LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
        /* Indirection tables. */
index c37ef7a..26f8704 100644 (file)
@@ -509,7 +509,7 @@ struct mlx5_flow_handle {
        uint64_t act_flags;
        /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
        void *ib_flow; /**< Verbs flow pointer. */
-       struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+       uint32_t hrxq; /**< Hash Rx queue object index. */
        struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
        union {
                uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
index 6069cf3..5e6143b 100644 (file)
@@ -8102,8 +8102,9 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                        if (dv->transfer) {
                                dv->actions[n++] = priv->sh->esw_drop_action;
                        } else {
-                               dh->hrxq = mlx5_hrxq_drop_new(dev);
-                               if (!dh->hrxq) {
+                               struct mlx5_hrxq *drop_hrxq;
+                               drop_hrxq = mlx5_hrxq_drop_new(dev);
+                               if (!drop_hrxq) {
                                        rte_flow_error_set
                                                (error, errno,
                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -8111,28 +8112,31 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                 "cannot get drop hash queue");
                                        goto error;
                                }
-                               dv->actions[n++] = dh->hrxq->action;
+                               dv->actions[n++] = drop_hrxq->action;
                        }
                } else if (dh->act_flags &
                           (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
                        struct mlx5_hrxq *hrxq;
+                       uint32_t hrxq_idx;
 
                        MLX5_ASSERT(flow->rss.queue);
-                       hrxq = mlx5_hrxq_get(dev, flow->rss.key,
-                                            MLX5_RSS_HASH_KEY_LEN,
-                                            dev_flow->hash_fields,
-                                            (*flow->rss.queue),
-                                            flow->rss.queue_num);
-                       if (!hrxq) {
-                               hrxq = mlx5_hrxq_new
-                                       (dev, flow->rss.key,
-                                        MLX5_RSS_HASH_KEY_LEN,
-                                        dev_flow->hash_fields,
-                                        (*flow->rss.queue),
-                                        flow->rss.queue_num,
-                                        !!(dh->layers &
-                                           MLX5_FLOW_LAYER_TUNNEL));
+                       hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
+                                                MLX5_RSS_HASH_KEY_LEN,
+                                                dev_flow->hash_fields,
+                                                (*flow->rss.queue),
+                                                flow->rss.queue_num);
+                       if (!hrxq_idx) {
+                               hrxq_idx = mlx5_hrxq_new
+                                               (dev, flow->rss.key,
+                                               MLX5_RSS_HASH_KEY_LEN,
+                                               dev_flow->hash_fields,
+                                               (*flow->rss.queue),
+                                               flow->rss.queue_num,
+                                               !!(dh->layers &
+                                               MLX5_FLOW_LAYER_TUNNEL));
                        }
+                       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                                             hrxq_idx);
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
@@ -8140,8 +8144,8 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                         "cannot get hash queue");
                                goto error;
                        }
-                       dh->hrxq = hrxq;
-                       dv->actions[n++] = dh->hrxq->action;
+                       dh->hrxq = hrxq_idx;
+                       dv->actions[n++] = hrxq->action;
                }
                dh->ib_flow =
                        mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
@@ -8174,7 +8178,7 @@ error:
                                mlx5_hrxq_drop_release(dev);
                        else
                                mlx5_hrxq_release(dev, dh->hrxq);
-                       dh->hrxq = NULL;
+                       dh->hrxq = 0;
                }
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
@@ -8438,7 +8442,7 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
                                mlx5_hrxq_drop_release(dev);
                        else
                                mlx5_hrxq_release(dev, dh->hrxq);
-                       dh->hrxq = NULL;
+                       dh->hrxq = 0;
                }
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
index ef4d7a3..aa55f4e 100644 (file)
@@ -1753,7 +1753,7 @@ flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
                                mlx5_hrxq_drop_release(dev);
                        else
                                mlx5_hrxq_release(dev, handle->hrxq);
-                       handle->hrxq = NULL;
+                       handle->hrxq = 0;
                }
                if (handle->vf_vlan.tag && handle->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
@@ -1807,6 +1807,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_handle *handle;
        struct mlx5_flow *dev_flow;
+       struct mlx5_hrxq *hrxq;
        int err;
        int idx;
 
@@ -1814,8 +1815,8 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
                handle = dev_flow->handle;
                if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
-                       handle->hrxq = mlx5_hrxq_drop_new(dev);
-                       if (!handle->hrxq) {
+                       hrxq = mlx5_hrxq_drop_new(dev);
+                       if (!hrxq) {
                                rte_flow_error_set
                                        (error, errno,
                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1823,22 +1824,24 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                goto error;
                        }
                } else {
-                       struct mlx5_hrxq *hrxq;
+                       uint32_t hrxq_idx;
 
                        MLX5_ASSERT(flow->rss.queue);
-                       hrxq = mlx5_hrxq_get(dev, flow->rss.key,
+                       hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
                                             MLX5_RSS_HASH_KEY_LEN,
                                             dev_flow->hash_fields,
                                             (*flow->rss.queue),
                                             flow->rss.queue_num);
-                       if (!hrxq)
-                               hrxq = mlx5_hrxq_new(dev, flow->rss.key,
+                       if (!hrxq_idx)
+                               hrxq_idx = mlx5_hrxq_new(dev, flow->rss.key,
                                                MLX5_RSS_HASH_KEY_LEN,
                                                dev_flow->hash_fields,
                                                (*flow->rss.queue),
                                                flow->rss.queue_num,
                                                !!(handle->layers &
                                                MLX5_FLOW_LAYER_TUNNEL));
+                       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                                        hrxq_idx);
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
@@ -1846,9 +1849,10 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                         "cannot get hash queue");
                                goto error;
                        }
-                       handle->hrxq = hrxq;
+                       handle->hrxq = hrxq_idx;
                }
-               handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp,
+               MLX5_ASSERT(hrxq);
+               handle->ib_flow = mlx5_glue->create_flow(hrxq->qp,
                                                     &dev_flow->verbs.attr);
                if (!handle->ib_flow) {
                        rte_flow_error_set(error, errno,
@@ -1877,7 +1881,7 @@ error:
                                mlx5_hrxq_drop_release(dev);
                        else
                                mlx5_hrxq_release(dev, handle->hrxq);
-                       handle->hrxq = NULL;
+                       handle->hrxq = 0;
                }
                if (handle->vf_vlan.tag && handle->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
index 1cc9f1d..9bc7af6 100644 (file)
@@ -2389,9 +2389,9 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
  *   Tunnel type.
  *
  * @return
- *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ *   The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
  */
-struct mlx5_hrxq *
+uint32_t
 mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
@@ -2400,6 +2400,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
+       uint32_t hrxq_idx = 0;
        struct ibv_qp *qp = NULL;
        struct mlx5_ind_table_obj *ind_tbl;
        int err;
@@ -2419,7 +2420,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        }
        if (!ind_tbl) {
                rte_errno = ENOMEM;
-               return NULL;
+               return 0;
        }
        if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
@@ -2560,7 +2561,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                        goto error;
                }
        }
-       hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
+       hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
        if (!hrxq)
                goto error;
        hrxq->ind_table = ind_tbl;
@@ -2589,8 +2590,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        hrxq->hash_fields = hash_fields;
        memcpy(hrxq->rss_key, rss_key, rss_key_len);
        rte_atomic32_inc(&hrxq->refcnt);
-       LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
-       return hrxq;
+       ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
+                    hrxq, next);
+       return hrxq_idx;
 error:
        err = rte_errno; /* Save rte_errno before cleanup. */
        mlx5_ind_table_obj_release(dev, ind_tbl);
@@ -2599,7 +2601,7 @@ error:
        else if (tir)
                claim_zero(mlx5_devx_cmd_destroy(tir));
        rte_errno = err; /* Restore rte_errno. */
-       return NULL;
+       return 0;
 }
 
 /**
@@ -2616,9 +2618,9 @@ error:
  *   Number of queues.
  *
  * @return
- *   An hash Rx queue on success.
+ *   An hash Rx queue index on success.
  */
-struct mlx5_hrxq *
+uint32_t
 mlx5_hrxq_get(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
@@ -2626,9 +2628,11 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
+       uint32_t idx;
 
        queues_n = hash_fields ? queues_n : 1;
-       LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+       ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
+                     hrxq, next) {
                struct mlx5_ind_table_obj *ind_tbl;
 
                if (hrxq->rss_key_len != rss_key_len)
@@ -2645,9 +2649,9 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
                        continue;
                }
                rte_atomic32_inc(&hrxq->refcnt);
-               return hrxq;
+               return idx;
        }
-       return NULL;
+       return 0;
 }
 
 /**
@@ -2656,14 +2660,20 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
  * @param dev
  *   Pointer to Ethernet device.
  * @param hrxq
- *   Pointer to Hash Rx queue to release.
+ *   Index to Hash Rx queue to release.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 int
-mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hrxq *hrxq;
+
+       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+       if (!hrxq)
+               return 0;
        if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
                mlx5_glue->destroy_flow_action(hrxq->action);
@@ -2673,8 +2683,9 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
                else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
                        claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
                mlx5_ind_table_obj_release(dev, hrxq->ind_table);
-               LIST_REMOVE(hrxq, next);
-               rte_free(hrxq);
+               ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
+                            hrxq_idx, hrxq, next);
+               mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
                return 0;
        }
        claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
@@ -2695,9 +2706,11 @@ mlx5_hrxq_verify(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
+       uint32_t idx;
        int ret = 0;
 
-       LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+       ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
+                     hrxq, next) {
                DRV_LOG(DEBUG,
                        "port %u hash Rx queue %p still referenced",
                        dev->data->port_id, (void *)hrxq);
index 537d449..8695218 100644 (file)
@@ -231,7 +231,7 @@ struct mlx5_ind_table_obj {
 
 /* Hash Rx queue. */
 struct mlx5_hrxq {
-       LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
+       ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
        rte_atomic32_t refcnt; /* Reference counter. */
        struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
        RTE_STD_C11
@@ -406,16 +406,16 @@ int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_verify(struct rte_eth_dev *dev);
 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
-struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
-                               const uint8_t *rss_key, uint32_t rss_key_len,
-                               uint64_t hash_fields,
-                               const uint16_t *queues, uint32_t queues_n,
-                               int tunnel __rte_unused);
-struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
-                               const uint8_t *rss_key, uint32_t rss_key_len,
-                               uint64_t hash_fields,
-                               const uint16_t *queues, uint32_t queues_n);
-int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
+uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
+                      const uint8_t *rss_key, uint32_t rss_key_len,
+                      uint64_t hash_fields,
+                      const uint16_t *queues, uint32_t queues_n,
+                      int tunnel __rte_unused);
+uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+                      const uint8_t *rss_key, uint32_t rss_key_len,
+                      uint64_t hash_fields,
+                      const uint16_t *queues, uint32_t queues_n);
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
 int mlx5_hrxq_verify(struct rte_eth_dev *dev);
 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);