]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: remove Rx queue data list from device
authorXueming Li <xuemingl@nvidia.com>
Thu, 4 Nov 2021 12:33:18 +0000 (20:33 +0800)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 4 Nov 2021 21:55:49 +0000 (22:55 +0100)
Rx queue data list(priv->rxqs) can be replaced by Rx queue
list(priv->rxq_privs), removes it and replaces with universal wrapper
API.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
13 files changed:
drivers/net/mlx5/linux/mlx5_verbs.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_rss.c
drivers/net/mlx5/mlx5_rx.c
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx_vec.c
drivers/net/mlx5/mlx5_stats.c
drivers/net/mlx5/mlx5_trigger.c

index 5d4ae3ea752e3cc2f5ba8605c79f6f2a84cdb163..f78916c868fc1f1666972e8f4f3b36b2542b4665 100644 (file)
@@ -486,11 +486,10 @@ mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
 
        MLX5_ASSERT(ind_tbl);
        for (i = 0; i != ind_tbl->queues_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
-               struct mlx5_rxq_ctrl *rxq_ctrl =
-                               container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev,
+                                                        ind_tbl->queues[i]);
 
-               wq[i] = rxq_ctrl->obj->wq;
+               wq[i] = rxq->ctrl->obj->wq;
        }
        MLX5_ASSERT(i > 0);
        /* Finalise indirection table. */
index 374cc9757aa5d7cf91a5003275a3c00a7ca25f5c..8614b8ffddd68e4e9f9ced5f696ae328a871358a 100644 (file)
@@ -1687,20 +1687,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        /* Free the eCPRI flex parser resource. */
        mlx5_flex_parser_ecpri_release(dev);
        mlx5_flex_item_port_cleanup(dev);
-       if (priv->rxqs != NULL) {
+       if (priv->rxq_privs != NULL) {
                /* XXX race condition if mlx5_rx_burst() is still running. */
                rte_delay_us_sleep(1000);
                for (i = 0; (i != priv->rxqs_n); ++i)
                        mlx5_rxq_release(dev, i);
                priv->rxqs_n = 0;
-               priv->rxqs = NULL;
-       }
-       if (priv->representor) {
-               /* Each representor has a dedicated interrupts handler */
-               mlx5_free(dev->intr_handle);
-               dev->intr_handle = NULL;
-       }
-       if (priv->rxq_privs != NULL) {
                mlx5_free(priv->rxq_privs);
                priv->rxq_privs = NULL;
        }
index 967d92b4ad6e67ee7f2aec3d2f189ee0002a1a38..a037a33debf8ce4978768194ba8eceec48d420b5 100644 (file)
@@ -1410,7 +1410,6 @@ struct mlx5_priv {
        unsigned int rxqs_n; /* RX queues array size. */
        unsigned int txqs_n; /* TX queues array size. */
        struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
-       struct mlx5_rxq_data *(*rxqs)[]; /* (Shared) RX queues. */
        struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
        struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
        struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
index b90a5d824580447564288d7e834f4198da644444..668d47025e8d2e59774570f4f61207b556354f63 100644 (file)
@@ -684,15 +684,17 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
 
        /* NULL queues designate drop queue. */
        if (ind_tbl->queues != NULL) {
-               struct mlx5_rxq_data *rxq_data =
-                                       (*priv->rxqs)[ind_tbl->queues[0]];
                struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-               rxq_obj_type = rxq_ctrl->type;
+                               mlx5_rxq_ctrl_get(dev, ind_tbl->queues[0]);
+               rxq_obj_type = rxq_ctrl != NULL ? rxq_ctrl->type :
+                                                 MLX5_RXQ_TYPE_STANDARD;
 
                /* Enable TIR LRO only if all the queues were configured for. */
                for (i = 0; i < ind_tbl->queues_n; ++i) {
-                       if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
+                       struct mlx5_rxq_data *rxq_i =
+                               mlx5_rxq_data_get(dev, ind_tbl->queues[i]);
+
+                       if (rxq_i != NULL && !rxq_i->lro) {
                                lro = false;
                                break;
                        }
index cde505955dfd4a8a18e88a873e2f6d6e5a159f89..bb38d5d2adee0dce15822a71ae93110b83cc95bf 100644 (file)
@@ -114,7 +114,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
                rte_errno = ENOMEM;
                return -rte_errno;
        }
-       priv->rxqs = (void *)dev->data->rx_queues;
        priv->txqs = (void *)dev->data->tx_queues;
        if (txqs_n != priv->txqs_n) {
                DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
@@ -171,11 +170,8 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
                return -rte_errno;
        }
        for (i = 0, j = 0; i < rxqs_n; i++) {
-               struct mlx5_rxq_data *rxq_data;
-               struct mlx5_rxq_ctrl *rxq_ctrl;
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
 
-               rxq_data = (*priv->rxqs)[i];
-               rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
                if (rxq_ctrl && rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
                        rss_queue_arr[j++] = i;
        }
index 5435660a2dd78fef111e8be3673642618589d3af..2f30a3552581baf59a58b4997b5c8977ccec5546 100644 (file)
@@ -1210,10 +1210,11 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
                return;
        for (i = 0; i != ind_tbl->queues_n; ++i) {
                int idx = ind_tbl->queues[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of((*priv->rxqs)[idx],
-                                    struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
 
+               MLX5_ASSERT(rxq_ctrl != NULL);
+               if (rxq_ctrl == NULL)
+                       continue;
                /*
                 * To support metadata register copy on Tx loopback,
                 * this must be always enabled (metadata may arive
@@ -1305,10 +1306,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
        MLX5_ASSERT(dev->data->dev_started);
        for (i = 0; i != ind_tbl->queues_n; ++i) {
                int idx = ind_tbl->queues[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of((*priv->rxqs)[idx],
-                                    struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
 
+               MLX5_ASSERT(rxq_ctrl != NULL);
+               if (rxq_ctrl == NULL)
+                       continue;
                if (priv->config.dv_flow_en &&
                    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
                    mlx5_flow_ext_mreg_supported(dev)) {
@@ -1369,18 +1371,16 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
                unsigned int j;
 
-               if (!(*priv->rxqs)[i])
+               if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               rxq_ctrl = container_of((*priv->rxqs)[i],
-                                       struct mlx5_rxq_ctrl, rxq);
-               rxq_ctrl->flow_mark_n = 0;
-               rxq_ctrl->rxq.mark = 0;
+               rxq->ctrl->flow_mark_n = 0;
+               rxq->ctrl->rxq.mark = 0;
                for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
-                       rxq_ctrl->flow_tunnels_n[j] = 0;
-               rxq_ctrl->rxq.tunnel = 0;
+                       rxq->ctrl->flow_tunnels_n[j] = 0;
+               rxq->ctrl->rxq.tunnel = 0;
        }
 }
 
@@ -1394,13 +1394,15 @@ void
 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *data;
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
-               if (!(*priv->rxqs)[i])
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+               struct mlx5_rxq_data *data;
+
+               if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               data = (*priv->rxqs)[i];
+               data = &rxq->ctrl->rxq;
                if (!rte_flow_dynf_metadata_avail()) {
                        data->dynf_meta = 0;
                        data->flow_meta_mask = 0;
@@ -1591,7 +1593,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
                                          "queue index out of range");
-       if (!(*priv->rxqs)[queue->index])
+       if (mlx5_rxq_get(dev, queue->index) == NULL)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
@@ -1622,7 +1624,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
  *   0 on success, a negative errno code on error.
  */
 static int
-mlx5_validate_rss_queues(const struct rte_eth_dev *dev,
+mlx5_validate_rss_queues(struct rte_eth_dev *dev,
                         const uint16_t *queues, uint32_t queues_n,
                         const char **error, uint32_t *queue_idx)
 {
@@ -1631,20 +1633,19 @@ mlx5_validate_rss_queues(const struct rte_eth_dev *dev,
        uint32_t i;
 
        for (i = 0; i != queues_n; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev,
+                                                                  queues[i]);
 
                if (queues[i] >= priv->rxqs_n) {
                        *error = "queue index out of range";
                        *queue_idx = i;
                        return -EINVAL;
                }
-               if (!(*priv->rxqs)[queues[i]]) {
+               if (rxq_ctrl == NULL) {
                        *error =  "queue is not configured";
                        *queue_idx = i;
                        return -EINVAL;
                }
-               rxq_ctrl = container_of((*priv->rxqs)[queues[i]],
-                                       struct mlx5_rxq_ctrl, rxq);
                if (i == 0)
                        rxq_type = rxq_ctrl->type;
                if (rxq_type != rxq_ctrl->type) {
index a04e22398dbd39796fd5d22929affc643996763f..75af05b7b02e045587225981532e7c3027570f28 100644 (file)
@@ -65,9 +65,11 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
        priv->rss_conf.rss_hf = rss_conf->rss_hf;
        /* Enable the RSS hash in all Rx queues. */
        for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) {
-               if (!(*priv->rxqs)[i])
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+
+               if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
+               rxq->ctrl->rxq.rss_hash = !!rss_conf->rss_hf &&
                        !!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
                ++idx;
        }
index d41905a2a04f180f0475fae513cff90ed8b7f1e6..1ffa1b95b88b90a5bacb6500751035a16bb27752 100644 (file)
@@ -148,10 +148,8 @@ void
 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                  struct rte_eth_rxq_info *qinfo)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
-       struct mlx5_rxq_ctrl *rxq_ctrl =
-               container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+       struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id);
+       struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id);
 
        if (!rxq)
                return;
@@ -162,7 +160,10 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        qinfo->conf.rx_thresh.wthresh = 0;
        qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
        qinfo->conf.rx_drop_en = 1;
-       qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
+       if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL)
+               qinfo->conf.rx_deferred_start = 0;
+       else
+               qinfo->conf.rx_deferred_start = 1;
        qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
        qinfo->scattered_rx = dev->data->scattered_rx;
        qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
@@ -191,10 +192,8 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
                       struct rte_eth_burst_mode *mode)
 {
        eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *rxq;
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
 
-       rxq = (*priv->rxqs)[rx_queue_id];
        if (!rxq) {
                rte_errno = EINVAL;
                return -rte_errno;
index 337dcca59fb1d079c3199b242341a456281b1437..413e36f6d8ddaf99467c5b79203070acf7eb1e87 100644 (file)
@@ -603,14 +603,13 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
                return 0;
        /* All the configured queues should be enabled. */
        for (i = 0; i < priv->rxqs_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl = container_of
-                       (rxq, struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
 
-               if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+               if (rxq_ctrl == NULL ||
+                   rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
                        continue;
                n_ibv++;
-               if (mlx5_rxq_mprq_enabled(rxq))
+               if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
                        ++n;
        }
        /* Multi-Packet RQ can't be partially configured. */
index 2850a220399083b60e1bf8dabcd419b2df6ef712..f3fc618ed2c2c69a5460bd71f95a2f1b209cfe81 100644 (file)
@@ -748,7 +748,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        }
        DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
                dev->data->port_id, idx);
-       (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+       dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
        return 0;
 }
 
@@ -830,7 +830,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
        }
        DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
                dev->data->port_id, idx);
-       (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+       dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
        return 0;
 }
 
@@ -1163,7 +1163,7 @@ mlx5_mprq_free_mp(struct rte_eth_dev *dev)
        rte_mempool_free(mp);
        /* Unset mempool for each Rx queue. */
        for (i = 0; i != priv->rxqs_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+               struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
 
                if (rxq == NULL)
                        continue;
@@ -1204,12 +1204,13 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
                return 0;
        /* Count the total number of descriptors configured. */
        for (i = 0; i != priv->rxqs_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl = container_of
-                       (rxq, struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
+               struct mlx5_rxq_data *rxq;
 
-               if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+               if (rxq_ctrl == NULL ||
+                   rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
                        continue;
+               rxq = &rxq_ctrl->rxq;
                n_ibv++;
                desc += 1 << rxq->elts_n;
                /* Get the max number of strides. */
@@ -1292,13 +1293,12 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
 exit:
        /* Set mempool for each Rx queue. */
        for (i = 0; i != priv->rxqs_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl = container_of
-                       (rxq, struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
 
-               if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+               if (rxq_ctrl == NULL ||
+                   rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
                        continue;
-               rxq->mprq_mp = mp;
+               rxq_ctrl->rxq.mprq_mp = mp;
        }
        DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
                dev->data->port_id);
@@ -1777,8 +1777,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (priv->rxq_privs == NULL)
-               return NULL;
+       MLX5_ASSERT(priv->rxq_privs != NULL);
        return (*priv->rxq_privs)[idx];
 }
 
@@ -1862,7 +1861,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
                LIST_REMOVE(rxq, owner_entry);
                LIST_REMOVE(rxq_ctrl, next);
                mlx5_free(rxq_ctrl);
-               (*priv->rxqs)[idx] = NULL;
+               dev->data->rx_queues[idx] = NULL;
                mlx5_free(rxq);
                (*priv->rxq_privs)[idx] = NULL;
        }
@@ -1908,14 +1907,10 @@ enum mlx5_rxq_type
 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+       struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
 
-       if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
-               rxq_ctrl = container_of((*priv->rxqs)[idx],
-                                       struct mlx5_rxq_ctrl,
-                                       rxq);
+       if (idx < priv->rxqs_n && rxq_ctrl != NULL)
                return rxq_ctrl->type;
-       }
        return MLX5_RXQ_TYPE_UNDEFINED;
 }
 
@@ -2682,13 +2677,13 @@ mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
-       struct mlx5_rxq_data *data;
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
-               if (!(*priv->rxqs)[i])
+               struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
+
+               if (data == NULL)
                        continue;
-               data = (*priv->rxqs)[i];
                data->sh = sh;
                data->rt_timestamp = priv->config.rt_timestamp;
        }
index 511681841cae7153c986c00957961984d63e398f..6212ce8247dadb41bd3df4b4ae464964b5f522e6 100644 (file)
@@ -578,11 +578,11 @@ mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
                return -ENOTSUP;
        /* All the configured queues should support. */
        for (i = 0; i < priv->rxqs_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+               struct mlx5_rxq_data *rxq_data = mlx5_rxq_data_get(dev, i);
 
-               if (!rxq)
+               if (!rxq_data)
                        continue;
-               if (mlx5_rxq_check_vec_support(rxq) < 0)
+               if (mlx5_rxq_check_vec_support(rxq_data) < 0)
                        break;
        }
        if (i != priv->rxqs_n)
index ae2f5668a74d3e041145763fc85edda4dca6f501..732775954ad34c0483094e1d9ac61036932cd148 100644 (file)
@@ -107,7 +107,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        memset(&tmp, 0, sizeof(tmp));
        /* Add software counters. */
        for (i = 0; (i != priv->rxqs_n); ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+               struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
 
                if (rxq == NULL)
                        continue;
@@ -181,10 +181,11 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
        unsigned int i;
 
        for (i = 0; (i != priv->rxqs_n); ++i) {
-               if ((*priv->rxqs)[i] == NULL)
+               struct mlx5_rxq_data *rxq_data = mlx5_rxq_data_get(dev, i);
+
+               if (rxq_data == NULL)
                        continue;
-               memset(&(*priv->rxqs)[i]->stats, 0,
-                      sizeof(struct mlx5_rxq_stats));
+               memset(&rxq_data->stats, 0, sizeof(struct mlx5_rxq_stats));
        }
        for (i = 0; (i != priv->txqs_n); ++i) {
                if ((*priv->txqs)[i] == NULL)
index 2cf62a9780d8634c0b873e77162c5b7c842bce93..72475e4b5b50b74ae144bc2694ca7a1c22f309a7 100644 (file)
@@ -227,7 +227,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                if (!rxq_ctrl->obj) {
                        DRV_LOG(ERR,
                                "Port %u Rx queue %u can't allocate resources.",
-                               dev->data->port_id, (*priv->rxqs)[i]->idx);
+                               dev->data->port_id, i);
                        rte_errno = ENOMEM;
                        goto error;
                }