net/mlx5: register multiple pool for Rx queue
authorViacheslav Ovsiienko <viacheslavo@nvidia.com>
Mon, 26 Oct 2020 11:55:02 +0000 (11:55 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 3 Nov 2020 22:35:02 +0000 (23:35 +0100)
The split feature for receiving packets was added to the mlx5
PMD, now Rx queue can receive the data to the buffers belonging
to the different pools and the memory of all the involved pool
must be registered for DMA operations in order to allow hardware
to store the data.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/mlx5_mr.c
drivers/net/mlx5/mlx5_trigger.c

index dbcf0aa..c308ecc 100644 (file)
@@ -536,6 +536,9 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
                .ret = 0,
        };
 
+       DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+                      "having %u chunks.", dev->data->port_id,
+                      mp->name, mp->nb_mem_chunks);
        rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
        if (data.ret < 0 && rte_errno == ENXIO) {
                /* Mempool may have externally allocated memory. */
index 917b433..a7c4741 100644 (file)
@@ -145,18 +145,22 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                dev->data->port_id, priv->sh->device_attr.max_sge);
        for (i = 0; i != priv->rxqs_n; ++i) {
                struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-               struct rte_mempool *mp;
 
                if (!rxq_ctrl)
                        continue;
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-                       /* Pre-register Rx mempool. */
-                       mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-                            rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-                       DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
-                               " having %u chunks.", dev->data->port_id,
-                               rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
-                       mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+                       /* Pre-register Rx mempools. */
+                       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+                               mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+                                                 rxq_ctrl->rxq.mprq_mp);
+                       } else {
+                               uint32_t s;
+
+                               for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+                                       mlx5_mr_update_mp
+                                               (dev, &rxq_ctrl->rxq.mr_ctrl,
+                                               rxq_ctrl->rxq.rxseg[s].mp);
+                       }
                        ret = rxq_alloc_elts(rxq_ctrl);
                        if (ret)
                                goto error;